diff --git a/.gitmodules b/.gitmodules index 238c8f9..17449c8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,3 +6,7 @@ path = mros2-asp3-f767zi url = git@github.com:mROS-base/mros2-asp3-f767zi branch = eval/v0.3.1 +[submodule "micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils"] + path = micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/micro_ros_stm32cubemx_utils + url = https://github.com/mROS-base/micro_ros_stm32cubemx_utils + branch = galactic diff --git a/micro-ros-stm32_embeddedrtps/main_twist.c b/micro-ros-stm32_embeddedrtps/main_twist.c new file mode 100644 index 0000000..90e1eef --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/main_twist.c @@ -0,0 +1,505 @@ +/* USER CODE BEGIN Header */ +/** + ****************************************************************************** + * @file : main.c + * @brief : Main program body + ****************************************************************************** + * @attention + * + *
+ BaseType_t xCoRoutineCreate( + crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex + );+ * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: +
+ // Co-routine to be created. + void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + // This may not be necessary for const variables. + static const char cLedToFlash[ 2 ] = { 5, 6 }; + static const TickType_t uxFlashRates[ 2 ] = { 200, 400 }; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // This co-routine just delays for a fixed period, then toggles + // an LED. Two co-routines are created using this function, so + // the uxIndex parameter is used to tell the co-routine which + // LED to flash and how int32_t to delay. This assumes xQueue has + // already been created. + vParTestToggleLED( cLedToFlash[ uxIndex ] ); + crDELAY( xHandle, uxFlashRates[ uxIndex ] ); + } + + // Must end every co-routine with a call to crEND(); + crEND(); + } + + // Function that creates two co-routines. + void vOtherFunction( void ) + { + uint8_t ucParameterToPass; + TaskHandle_t xHandle; + + // Create two co-routines at priority 0. The first is given index 0 + // so (from the code above) toggles LED 5 every 200 ticks. The second + // is given index 1 so toggles LED 6 every 400 ticks. + for( uxIndex = 0; uxIndex < 2; uxIndex++ ) + { + xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex ); + } + } ++ * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, UBaseType_t uxPriority, UBaseType_t uxIndex ); + + +/** + * croutine. h + *
+ void vCoRoutineSchedule( void );+ * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: +
+ // This idle task hook will schedule a co-routine each time it is called. + // The rest of the idle task will execute between co-routine calls. + void vApplicationIdleHook( void ) + { + vCoRoutineSchedule(); + } + + // Alternatively, if you do not require any other part of the idle task to + // execute, the idle task hook can call vCoRoutineScheduler() within an + // infinite loop. + void vApplicationIdleHook( void ) + { + for( ;; ) + { + vCoRoutineSchedule(); + } + } ++ * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + *
+ crSTART( CoRoutineHandle_t xHandle );+ * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static int32_t ulAVariable; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Co-routine functionality goes here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) switch( ( ( CRCB_t * )( pxCRCB ) )->uxState ) { case 0: + +/** + * croutine. h + *
+ crEND();+ * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static int32_t ulAVariable; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Co-routine functionality goes here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = (__LINE__ * 2); return; case (__LINE__ * 2): +#define crSET_STATE1( xHandle ) ( ( CRCB_t * )( xHandle ) )->uxState = ((__LINE__ * 2)+1); return; case ((__LINE__ * 2)+1): + +/** + * croutine. h + *
+ crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay );+ * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: +
+ // Co-routine to be created. + void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + // This may not be necessary for const variables. + // We are to delay for 200ms. + static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS; + + // Must start every co-routine with a call to crSTART(); + crSTART( xHandle ); + + for( ;; ) + { + // Delay for 200ms. + crDELAY( xHandle, xDelayTime ); + + // Do something here. + } + + // Must end every co-routine with a call to crEND(); + crEND(); + }+ * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); + +/** + *
+ crQUEUE_SEND( + CoRoutineHandle_t xHandle, + QueueHandle_t pxQueue, + void *pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t *pxResult + )+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: +
+ // Co-routine function that blocks for a fixed period then posts a number onto + // a queue. + static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static BaseType_t xNumberToPost = 0; + static BaseType_t xResult; + + // Co-routines must begin with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // This assumes the queue has already been created. + crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult ); + + if( xResult != pdPASS ) + { + // The message was not posted! + } + + // Increment the number to be posted onto the queue. + xNumberToPost++; + + // Delay for 100 ticks. + crDELAY( xHandle, 100 ); + } + + // Co-routines must end with a call to crEND(). + crEND(); + }+ * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRSend( ( pxQueue) , ( pvItemToQueue) , ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+ crQUEUE_RECEIVE( + CoRoutineHandle_t xHandle, + QueueHandle_t pxQueue, + void *pvBuffer, + TickType_t xTicksToWait, + BaseType_t *pxResult + )+ * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: +
+ // A co-routine receives the number of an LED to flash from a queue. It + // blocks on the queue until the number is received. + static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // Variables in co-routines must be declared static if they must maintain value across a blocking call. + static BaseType_t xResult; + static UBaseType_t uxLEDToFlash; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Wait for data to become available on the queue. + crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + + if( xResult == pdPASS ) + { + // We received the LED to flash - flash it! + vParTestToggleLED( uxLEDToFlash ); + } + } + + crEND(); + }+ * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ +{ \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue) , ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ +} + +/** + * croutine. h + *
+ crQUEUE_SEND_FROM_ISR( + QueueHandle_t pxQueue, + void *pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken + )+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: +
+ // A co-routine that blocks on a queue waiting for characters to be received. + static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + char cRxedChar; + BaseType_t xResult; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Wait for data to become available on the queue. This assumes the + // queue xCommsRxQueue has already been created! + crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + + // Was a character received? + if( xResult == pdPASS ) + { + // Process the character here. + } + } + + // All co-routines must end with a call to crEND(). + crEND(); + } + + // An ISR that uses a queue to send characters received on a serial port to + // a co-routine. + void vUART_ISR( void ) + { + char cRxedChar; + BaseType_t xCRWokenByPost = pdFALSE; + + // We loop around reading characters until there are none left in the UART. + while( UART_RX_REG_NOT_EMPTY() ) + { + // Obtain the character from the UART. + cRxedChar = UART_RX_REG; + + // Post the character onto a queue. xCRWokenByPost will be pdFALSE + // the first time around the loop. If the post causes a co-routine + // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE. + // In this manner we can ensure that if more than one co-routine is + // blocked on the queue only one is woken by this ISR no matter how + // many characters are posted to the queue. + xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost ); + } + }+ * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + *
+ crQUEUE_SEND_FROM_ISR( + QueueHandle_t pxQueue, + void *pvBuffer, + BaseType_t * pxCoRoutineWoken + )+ * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: +
+ // A co-routine that posts a character to a queue then blocks for a fixed + // period. The character is incremented each time. + static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + { + // cChar holds its value while this co-routine is blocked and must therefore + // be declared static. + static char cCharToTx = 'a'; + BaseType_t xResult; + + // All co-routines must start with a call to crSTART(). + crSTART( xHandle ); + + for( ;; ) + { + // Send the next character to the queue. + crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult ); + + if( xResult == pdPASS ) + { + // The character was successfully posted to the queue. + } + else + { + // Could not post the character to the queue. + } + + // Enable the UART Tx interrupt to cause an interrupt in this + // hypothetical UART. The interrupt will obtain the character + // from the queue and send it. + ENABLE_RX_INTERRUPT(); + + // Increment to the next character then block for a fixed period. + // cCharToTx will maintain its value across the delay as it is + // declared static. + cCharToTx++; + if( cCharToTx > 'x' ) + { + cCharToTx = 'a'; + } + crDELAY( 100 ); + } + + // All co-routines must end with a call to crEND(). + crEND(); + } + + // An ISR that uses a queue to receive characters to send on a UART. + void vUART_ISR( void ) + { + char cCharToTx; + BaseType_t xCRWokenByPost = pdFALSE; + + while( UART_TX_REG_EMPTY() ) + { + // Are there any characters in the queue waiting to be sent? + // xCRWokenByPost will automatically be set to pdTRUE if a co-routine + // is woken by the post - ensuring that only a single co-routine is + // woken no matter how many times we go around this loop. + if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) ) + { + SEND_CHARACTER( cCharToTx ); + } + } + }+ * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, List_t *pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t *pxEventList ); + +#ifdef __cplusplus +} +#endif + +#endif /* CO_ROUTINE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h new file mode 100644 index 0000000..4d0ce01 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/deprecated_definitions.h @@ -0,0 +1,279 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef DEPRECATED_DEFINITIONS_H +#define DEPRECATED_DEFINITIONS_H + + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. The +definitions below remain in the code for backward compatibility only. New +projects should not use them. */ + +#ifdef OPEN_WATCOM_INDUSTRIAL_PC_PORT + #include "..\..\Source\portable\owatcom\16bitdos\pc\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef OPEN_WATCOM_FLASH_LITE_186_PORT + #include "..\..\Source\portable\owatcom\16bitdos\flsh186\portmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef GCC_MEGA_AVR + #include "../portable/GCC/ATMega323/portmacro.h" +#endif + +#ifdef IAR_MEGA_AVR + #include "../portable/IAR/ATMega323/portmacro.h" +#endif + +#ifdef MPLAB_PIC24_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_DSPIC_PORT + #include "../../Source/portable/MPLAB/PIC24_dsPIC/portmacro.h" +#endif + +#ifdef MPLAB_PIC18F_PORT + #include "../../Source/portable/MPLAB/PIC18F/portmacro.h" +#endif + +#ifdef MPLAB_PIC32MX_PORT + #include "../../Source/portable/MPLAB/PIC32MX/portmacro.h" +#endif + +#ifdef _FEDPICC + #include "libFreeRTOS/Include/portmacro.h" +#endif + +#ifdef SDCC_CYGNAL + #include "../../Source/portable/SDCC/Cygnal/portmacro.h" +#endif + +#ifdef GCC_ARM7 + #include "../../Source/portable/GCC/ARM7_LPC2000/portmacro.h" +#endif + +#ifdef GCC_ARM7_ECLIPSE + #include "portmacro.h" +#endif + +#ifdef ROWLEY_LPC23xx + #include "../../Source/portable/GCC/ARM7_LPC23xx/portmacro.h" +#endif + +#ifdef IAR_MSP430 + #include "..\..\Source\portable\IAR\MSP430\portmacro.h" +#endif + +#ifdef GCC_MSP430 + #include "../../Source/portable/GCC/MSP430F449/portmacro.h" +#endif + +#ifdef ROWLEY_MSP430 + #include "../../Source/portable/Rowley/MSP430F449/portmacro.h" +#endif + +#ifdef ARM7_LPC21xx_KEIL_RVDS + #include "..\..\Source\portable\RVDS\ARM7_LPC21xx\portmacro.h" +#endif + +#ifdef SAM7_GCC + #include "../../Source/portable/GCC/ARM7_AT91SAM7S/portmacro.h" +#endif + +#ifdef SAM7_IAR + #include "..\..\Source\portable\IAR\AtmelSAM7S64\portmacro.h" +#endif + +#ifdef SAM9XE_IAR + #include "..\..\Source\portable\IAR\AtmelSAM9XE\portmacro.h" +#endif + +#ifdef LPC2000_IAR + #include "..\..\Source\portable\IAR\LPC2000\portmacro.h" +#endif + +#ifdef STR71X_IAR + #include "..\..\Source\portable\IAR\STR71x\portmacro.h" +#endif + +#ifdef STR75X_IAR + #include "..\..\Source\portable\IAR\STR75x\portmacro.h" +#endif + +#ifdef STR75X_GCC + #include "..\..\Source\portable\GCC\STR75x\portmacro.h" +#endif + +#ifdef STR91X_IAR + #include "..\..\Source\portable\IAR\STR91x\portmacro.h" +#endif + +#ifdef GCC_H8S + #include "../../Source/portable/GCC/H8S2329/portmacro.h" +#endif + +#ifdef GCC_AT91FR40008 + #include "../../Source/portable/GCC/ARM7_AT91FR40008/portmacro.h" +#endif + +#ifdef RVDS_ARMCM3_LM3S102 + #include "../../Source/portable/RVDS/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3_LM3S102 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef GCC_ARMCM3 + #include "../../Source/portable/GCC/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARM_CM3 + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef IAR_ARMCM3_LM + #include "../../Source/portable/IAR/ARM_CM3/portmacro.h" +#endif + +#ifdef HCS12_CODE_WARRIOR + #include "../../Source/portable/CodeWarrior/HCS12/portmacro.h" +#endif + +#ifdef MICROBLAZE_GCC + #include "../../Source/portable/GCC/MicroBlaze/portmacro.h" +#endif + +#ifdef TERN_EE + #include "..\..\Source\portable\Paradigm\Tern_EE\small\portmacro.h" +#endif + +#ifdef GCC_HCS12 + #include "../../Source/portable/GCC/HCS12/portmacro.h" +#endif + +#ifdef GCC_MCF5235 + #include "../../Source/portable/GCC/MCF5235/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_GCC + #include "../../../Source/portable/GCC/ColdFire_V2/portmacro.h" +#endif + +#ifdef COLDFIRE_V2_CODEWARRIOR + #include "../../Source/portable/CodeWarrior/ColdFire_V2/portmacro.h" +#endif + +#ifdef GCC_PPC405 + #include "../../Source/portable/GCC/PPC405_Xilinx/portmacro.h" +#endif + +#ifdef GCC_PPC440 + #include "../../Source/portable/GCC/PPC440_Xilinx/portmacro.h" +#endif + +#ifdef _16FX_SOFTUNE + #include "..\..\Source\portable\Softune\MB96340\portmacro.h" +#endif + +#ifdef BCC_INDUSTRIAL_PC_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\PC\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef BCC_FLASH_LITE_186_PORT + /* A short file name has to be used in place of the normal + FreeRTOSConfig.h when using the Borland compiler. */ + #include "frconfig.h" + #include "..\portable\BCC\16BitDOS\flsh186\prtmacro.h" + typedef void ( __interrupt __far *pxISR )(); +#endif + +#ifdef __GNUC__ + #ifdef __AVR32_AVR32A__ + #include "portmacro.h" + #endif +#endif + +#ifdef __ICCAVR32__ + #ifdef __CORE__ + #if __CORE__ == __AVR32A__ + #include "portmacro.h" + #endif + #endif +#endif + +#ifdef __91467D + #include "portmacro.h" +#endif + +#ifdef __96340 + #include "portmacro.h" +#endif + + +#ifdef __IAR_V850ES_Fx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx3_L__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Jx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_V850ES_Hx2__ + #include "../../Source/portable/IAR/V850ES/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#ifdef __IAR_78K0R_Kx3L__ + #include "../../Source/portable/IAR/78K0R/portmacro.h" +#endif + +#endif /* DEPRECATED_DEFINITIONS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h new file mode 100644 index 0000000..522b0b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/event_groups.h @@ -0,0 +1,757 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef EVENT_GROUPS_H +#define EVENT_GROUPS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include event_groups.h" +#endif + +/* FreeRTOS includes. */ +#include "timers.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * An event group is a collection of bits to which an application can assign a + * meaning. For example, an application may create an event group to convey + * the status of various CAN bus related events in which bit 0 might mean "A CAN + * message has been received and is ready for processing", bit 1 might mean "The + * application has queued a message that is ready for sending onto the CAN + * network", and bit 2 might mean "It is time to send a SYNC message onto the + * CAN network" etc. A task can then test the bit values to see which events + * are active, and optionally enter the Blocked state to wait for a specified + * bit or a group of specified bits to be active. To continue the CAN bus + * example, a CAN controlling task can enter the Blocked state (and therefore + * not consume any processing time) until either bit 0, bit 1 or bit 2 are + * active, at which time the bit that was actually active would inform the task + * which action it had to take (process a received message, send a message, or + * send a SYNC). + * + * The event groups implementation contains intelligence to avoid race + * conditions that would otherwise occur were an application to use a simple + * variable for the same purpose. This is particularly important with respect + * to when a bit within an event group is to be cleared, and when bits have to + * be set and then tested atomically - as is the case where event groups are + * used to create a synchronisation point between multiple tasks (a + * 'rendezvous'). + * + * \defgroup EventGroup + */ + + + +/** + * event_groups.h + * + * Type by which event groups are referenced. For example, a call to + * xEventGroupCreate() returns an EventGroupHandle_t variable that can then + * be used as a parameter to other event group functions. + * + * \defgroup EventGroupHandle_t EventGroupHandle_t + * \ingroup EventGroup + */ +struct EventGroupDef_t; +typedef struct EventGroupDef_t * EventGroupHandle_t; + +/* + * The type that holds event bits always matches TickType_t - therefore the + * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, + * 32 bits if set to 0. + * + * \defgroup EventBits_t EventBits_t + * \ingroup EventGroup + */ +typedef TickType_t EventBits_t; + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreate( void ); ++ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @return If the event group was created then a handle to the event group is + * returned. If there was insufficient FreeRTOS heap available to create the + * event group then NULL is returned. See http://www.freertos.org/a00111.html + * + * Example usage: +
+ // Declare a variable to hold the created event group. + EventGroupHandle_t xCreatedEventGroup; + + // Attempt to create the event group. + xCreatedEventGroup = xEventGroupCreate(); + + // Was the event group created successfully? + if( xCreatedEventGroup == NULL ) + { + // The event group was not created because there was insufficient + // FreeRTOS heap available. + } + else + { + // The event group was created. + } ++ * \defgroup xEventGroupCreate xEventGroupCreate + * \ingroup EventGroup + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer ); ++ * + * Create a new event group. + * + * Internally, within the FreeRTOS implementation, event groups use a [small] + * block of memory, in which the event group's structure is stored. If an event + * groups is created using xEventGropuCreate() then the required memory is + * automatically dynamically allocated inside the xEventGroupCreate() function. + * (see http://www.freertos.org/a00111.html). If an event group is created + * using xEventGropuCreateStatic() then the application writer must instead + * provide the memory that will get used by the event group. + * xEventGroupCreateStatic() therefore allows an event group to be created + * without using any dynamic memory allocation. + * + * Although event groups are not related to ticks, for internal implementation + * reasons the number of bits available for use in an event group is dependent + * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If + * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has + * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store + * event bits within an event group. + * + * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type + * StaticEventGroup_t, which will be then be used to hold the event group's data + * structures, removing the need for the memory to be allocated dynamically. + * + * @return If the event group was created then a handle to the event group is + * returned. If pxEventGroupBuffer was NULL then NULL is returned. + * + * Example usage: +
+ // StaticEventGroup_t is a publicly accessible structure that has the same + // size and alignment requirements as the real event group structure. It is + // provided as a mechanism for applications to know the size of the event + // group (which is dependent on the architecture and configuration file + // settings) without breaking the strict data hiding policy by exposing the + // real event group internals. This StaticEventGroup_t variable is passed + // into the xSemaphoreCreateEventGroupStatic() function and is used to store + // the event group's data structures + StaticEventGroup_t xEventGroupBuffer; + + // Create the event group without dynamically allocating any memory. + xEventGroup = xEventGroupCreateStatic( &xEventGroupBuffer ); ++ */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + const TickType_t xTicksToWait ); ++ * + * [Potentially] block to wait for one or more bits to be set within a + * previously created event group. + * + * This function cannot be called from an interrupt. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and/or bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and/or bit 1 and/or bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xClearOnExit If xClearOnExit is set to pdTRUE then any bits within + * uxBitsToWaitFor that are set within the event group will be cleared before + * xEventGroupWaitBits() returns if the wait condition was met (if the function + * returns for a reason other than a timeout). If xClearOnExit is set to + * pdFALSE then the bits set in the event group are not altered when the call to + * xEventGroupWaitBits() returns. + * + * @param xWaitForAllBits If xWaitForAllBits is set to pdTRUE then + * xEventGroupWaitBits() will return when either all the bits in uxBitsToWaitFor + * are set or the specified block time expires. If xWaitForAllBits is set to + * pdFALSE then xEventGroupWaitBits() will return when any one of the bits set + * in uxBitsToWaitFor is set or the specified block time expires. The block + * time is specified by the xTicksToWait parameter. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for one/all (depending on the xWaitForAllBits value) of the bits specified by + * uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupWaitBits() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupWaitBits() returned because the bits it was waiting for were set + * then the returned value is the event group value before any bits were + * automatically cleared in the case that xClearOnExit parameter was set to + * pdTRUE. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + const TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS; + + // Wait a maximum of 100ms for either bit 0 or bit 4 to be set within + // the event group. Clear the bits before exiting. + uxBits = xEventGroupWaitBits( + xEventGroup, // The event group being tested. + BIT_0 | BIT_4, // The bits within the event group to wait for. + pdTRUE, // BIT_0 and BIT_4 should be cleared before returning. + pdFALSE, // Don't wait for both bits, either bit will do. + xTicksToWait ); // Wait a maximum of 100ms for either bit to be set. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // xEventGroupWaitBits() returned because both bits were set. + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // xEventGroupWaitBits() returned because just BIT_0 was set. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // xEventGroupWaitBits() returned because just BIT_4 was set. + } + else + { + // xEventGroupWaitBits() returned because xTicksToWait ticks passed + // without either BIT_0 or BIT_4 becoming set. + } + } ++ * \defgroup xEventGroupWaitBits xEventGroupWaitBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ); ++ * + * Clear bits within an event group. This function cannot be called from an + * interrupt. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear + * in the event group. For example, to clear bit 3 only, set uxBitsToClear to + * 0x08. To clear bit 3 and bit 0 set uxBitsToClear to 0x09. + * + * @return The value of the event group before the specified bits were cleared. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + + // Clear bit 0 and bit 4 in xEventGroup. + uxBits = xEventGroupClearBits( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 );// The bits being cleared. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // Both bit 0 and bit 4 were set before xEventGroupClearBits() was + // called. Both will now be clear (not set). + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // Bit 0 was set before xEventGroupClearBits() was called. It will + // now be clear. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // Bit 4 was set before xEventGroupClearBits() was called. It will + // now be clear. + } + else + { + // Neither bit 0 nor bit 4 were set in the first place. + } + } ++ * \defgroup xEventGroupClearBits xEventGroupClearBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ); ++ * + * A version of xEventGroupClearBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed + * while interrupts are disabled, so protects event groups that are accessed + * from tasks by suspending the scheduler rather than disabling interrupts. As + * a result event groups cannot be accessed directly from an interrupt service + * routine. Therefore xEventGroupClearBitsFromISR() sends a message to the + * timer task to have the clear operation performed in the context of the timer + * task. + * + * @param xEventGroup The event group in which the bits are to be cleared. + * + * @param uxBitsToClear A bitwise value that indicates the bit or bits to clear. + * For example, to clear bit 3 only, set uxBitsToClear to 0x08. To clear bit 3 + * and bit 0 set uxBitsToClear to 0x09. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + // An event group which it is assumed has already been created by a call to + // xEventGroupCreate(). + EventGroupHandle_t xEventGroup; + + void anInterruptHandler( void ) + { + // Clear bit 0 and bit 4 in xEventGroup. + xResult = xEventGroupClearBitsFromISR( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 ); // The bits being set. + + if( xResult == pdPASS ) + { + // The message was posted successfully. + } + } ++ * \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear ) xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL ) +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ); ++ * + * Set bits within an event group. + * This function cannot be called from an interrupt. xEventGroupSetBitsFromISR() + * is a version that can be called from an interrupt. + * + * Setting bits in an event group will automatically unblock tasks that are + * blocked waiting for the bits. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @return The value of the event group at the time the call to + * xEventGroupSetBits() returns. There are two reasons why the returned value + * might have the bits specified by the uxBitsToSet parameter cleared. First, + * if setting a bit results in a task that was waiting for the bit leaving the + * blocked state then it is possible the bit will be cleared automatically + * (see the xClearBitOnExit parameter of xEventGroupWaitBits()). Second, any + * unblocked (or otherwise Ready state) task that has a priority above that of + * the task that called xEventGroupSetBits() will execute and may change the + * event group value before the call to xEventGroupSetBits() returns. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + void aFunction( EventGroupHandle_t xEventGroup ) + { + EventBits_t uxBits; + + // Set bit 0 and bit 4 in xEventGroup. + uxBits = xEventGroupSetBits( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 );// The bits being set. + + if( ( uxBits & ( BIT_0 | BIT_4 ) ) == ( BIT_0 | BIT_4 ) ) + { + // Both bit 0 and bit 4 remained set when the function returned. + } + else if( ( uxBits & BIT_0 ) != 0 ) + { + // Bit 0 remained set when the function returned, but bit 4 was + // cleared. It might be that bit 4 was cleared automatically as a + // task that was waiting for bit 4 was removed from the Blocked + // state. + } + else if( ( uxBits & BIT_4 ) != 0 ) + { + // Bit 4 remained set when the function returned, but bit 0 was + // cleared. It might be that bit 0 was cleared automatically as a + // task that was waiting for bit 0 was removed from the Blocked + // state. + } + else + { + // Neither bit 0 nor bit 4 remained set. It might be that a task + // was waiting for both of the bits to be set, and the bits were + // cleared as the task left the Blocked state. + } + } ++ * \defgroup xEventGroupSetBits xEventGroupSetBits + * \ingroup EventGroup + */ +EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * A version of xEventGroupSetBits() that can be called from an interrupt. + * + * Setting bits in an event group is not a deterministic operation because there + * are an unknown number of tasks that may be waiting for the bit or bits being + * set. FreeRTOS does not allow nondeterministic operations to be performed in + * interrupts or from critical sections. Therefore xEventGroupSetBitsFromISR() + * sends a message to the timer task to have the set operation performed in the + * context of the timer task - where a scheduler lock is used in place of a + * critical section. + * + * @param xEventGroup The event group in which the bits are to be set. + * + * @param uxBitsToSet A bitwise value that indicates the bit or bits to set. + * For example, to set bit 3 only, set uxBitsToSet to 0x08. To set bit 3 + * and bit 0 set uxBitsToSet to 0x09. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task is higher than the priority of the + * currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE by + * xEventGroupSetBitsFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return If the request to execute the function was posted successfully then + * pdPASS is returned, otherwise pdFALSE is returned. pdFALSE will be returned + * if the timer service queue was full. + * + * Example usage: +
+ #define BIT_0 ( 1 << 0 ) + #define BIT_4 ( 1 << 4 ) + + // An event group which it is assumed has already been created by a call to + // xEventGroupCreate(). + EventGroupHandle_t xEventGroup; + + void anInterruptHandler( void ) + { + BaseType_t xHigherPriorityTaskWoken, xResult; + + // xHigherPriorityTaskWoken must be initialised to pdFALSE. + xHigherPriorityTaskWoken = pdFALSE; + + // Set bit 0 and bit 4 in xEventGroup. + xResult = xEventGroupSetBitsFromISR( + xEventGroup, // The event group being updated. + BIT_0 | BIT_4 // The bits being set. + &xHigherPriorityTaskWoken ); + + // Was the message posted successfully? + if( xResult == pdPASS ) + { + // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + // switch should be requested. The macro used is port specific and + // will be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - + // refer to the documentation page for the port being used. + portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + } + } ++ * \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR + * \ingroup EventGroup + */ +#if( configUSE_TRACE_FACILITY == 1 ) + BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#else + #define xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken ) xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken ) +#endif + +/** + * event_groups.h + *
+ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ); ++ * + * Atomically set bits within an event group, then wait for a combination of + * bits to be set within the same event group. This functionality is typically + * used to synchronise multiple tasks, where each task has to wait for the other + * tasks to reach a synchronisation point before proceeding. + * + * This function cannot be used from an interrupt. + * + * The function will return before its block time expires if the bits specified + * by the uxBitsToWait parameter are set, or become set within that time. In + * this case all the bits specified by uxBitsToWait will be automatically + * cleared before the function returns. + * + * @param xEventGroup The event group in which the bits are being tested. The + * event group must have previously been created using a call to + * xEventGroupCreate(). + * + * @param uxBitsToSet The bits to set in the event group before determining + * if, and possibly waiting for, all the bits specified by the uxBitsToWait + * parameter are set. + * + * @param uxBitsToWaitFor A bitwise value that indicates the bit or bits to test + * inside the event group. For example, to wait for bit 0 and bit 2 set + * uxBitsToWaitFor to 0x05. To wait for bits 0 and bit 1 and bit 2 set + * uxBitsToWaitFor to 0x07. Etc. + * + * @param xTicksToWait The maximum amount of time (specified in 'ticks') to wait + * for all of the bits specified by uxBitsToWaitFor to become set. + * + * @return The value of the event group at the time either the bits being waited + * for became set, or the block time expired. Test the return value to know + * which bits were set. If xEventGroupSync() returned because its timeout + * expired then not all the bits being waited for will be set. If + * xEventGroupSync() returned because all the bits it was waiting for were + * set then the returned value is the event group value before any bits were + * automatically cleared. + * + * Example usage: +
+ // Bits used by the three tasks. + #define TASK_0_BIT ( 1 << 0 ) + #define TASK_1_BIT ( 1 << 1 ) + #define TASK_2_BIT ( 1 << 2 ) + + #define ALL_SYNC_BITS ( TASK_0_BIT | TASK_1_BIT | TASK_2_BIT ) + + // Use an event group to synchronise three tasks. It is assumed this event + // group has already been created elsewhere. + EventGroupHandle_t xEventBits; + + void vTask0( void *pvParameters ) + { + EventBits_t uxReturn; + TickType_t xTicksToWait = 100 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Perform task functionality here. + + // Set bit 0 in the event flag to note this task has reached the + // sync point. The other two tasks will set the other two bits defined + // by ALL_SYNC_BITS. All three tasks have reached the synchronisation + // point when all the ALL_SYNC_BITS are set. Wait a maximum of 100ms + // for this to happen. + uxReturn = xEventGroupSync( xEventBits, TASK_0_BIT, ALL_SYNC_BITS, xTicksToWait ); + + if( ( uxReturn & ALL_SYNC_BITS ) == ALL_SYNC_BITS ) + { + // All three tasks reached the synchronisation point before the call + // to xEventGroupSync() timed out. + } + } + } + + void vTask1( void *pvParameters ) + { + for( ;; ) + { + // Perform task functionality here. + + // Set bit 1 in the event flag to note this task has reached the + // synchronisation point. The other two tasks will set the other two + // bits defined by ALL_SYNC_BITS. All three tasks have reached the + // synchronisation point when all the ALL_SYNC_BITS are set. Wait + // indefinitely for this to happen. + xEventGroupSync( xEventBits, TASK_1_BIT, ALL_SYNC_BITS, portMAX_DELAY ); + + // xEventGroupSync() was called with an indefinite block time, so + // this task will only reach here if the syncrhonisation was made by all + // three tasks, so there is no need to test the return value. + } + } + + void vTask2( void *pvParameters ) + { + for( ;; ) + { + // Perform task functionality here. + + // Set bit 2 in the event flag to note this task has reached the + // synchronisation point. The other two tasks will set the other two + // bits defined by ALL_SYNC_BITS. All three tasks have reached the + // synchronisation point when all the ALL_SYNC_BITS are set. Wait + // indefinitely for this to happen. + xEventGroupSync( xEventBits, TASK_2_BIT, ALL_SYNC_BITS, portMAX_DELAY ); + + // xEventGroupSync() was called with an indefinite block time, so + // this task will only reach here if the syncrhonisation was made by all + // three tasks, so there is no need to test the return value. + } + } + ++ * \defgroup xEventGroupSync xEventGroupSync + * \ingroup EventGroup + */ +EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + +/** + * event_groups.h + *
+ EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup ); ++ * + * Returns the current value of the bits in an event group. This function + * cannot be used from an interrupt. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBits() was called. + * + * \defgroup xEventGroupGetBits xEventGroupGetBits + * \ingroup EventGroup + */ +#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 ) + +/** + * event_groups.h + *
+ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ); ++ * + * A version of xEventGroupGetBits() that can be called from an ISR. + * + * @param xEventGroup The event group being queried. + * + * @return The event group bits at the time xEventGroupGetBitsFromISR() was called. + * + * \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR + * \ingroup EventGroup + */ +EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/** + * event_groups.h + *
+ void xEventGroupDelete( EventGroupHandle_t xEventGroup ); ++ * + * Delete an event group that was previously created by a call to + * xEventGroupCreate(). Tasks that are blocked on the event group will be + * unblocked and obtain 0 as the event group's value. + * + * @param xEventGroup The event group being deleted. + */ +void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; + +/* For internal use only. */ +void vEventGroupSetBitsCallback( void *pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; +void vEventGroupClearBitsCallback( void *pvEventGroup, const uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + +#if (configUSE_TRACE_FACILITY == 1) + UBaseType_t uxEventGroupGetNumber( void* xEventGroup ) PRIVILEGED_FUNCTION; + void vEventGroupSetNumber( void* xEventGroup, UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* EVENT_GROUPS_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h new file mode 100644 index 0000000..b2bb46d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/list.h @@ -0,0 +1,412 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * This is the list implementation used by the scheduler. While it is tailored + * heavily for the schedulers needs, it is also available for use by + * application code. + * + * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a + * numeric value (xItemValue). Most of the time the lists are sorted in + * descending item value order. + * + * Lists are created already containing one list item. The value of this + * item is the maximum possible that can be stored, it is therefore always at + * the end of the list and acts as a marker. The list member pxHead always + * points to this marker - even though it is at the tail of the list. This + * is because the tail contains a wrap back pointer to the true head of + * the list. + * + * In addition to it's value, each list item contains a pointer to the next + * item in the list (pxNext), a pointer to the list it is in (pxContainer) + * and a pointer to back to the object that contains it. These later two + * pointers are included for efficiency of list manipulation. There is + * effectively a two way link between the object containing the list item and + * the list item itself. + * + * + * \page ListIntroduction List Implementation + * \ingroup FreeRTOSIntro + */ + +#ifndef INC_FREERTOS_H + #error FreeRTOS.h must be included before list.h +#endif + +#ifndef LIST_H +#define LIST_H + +/* + * The list structure members are modified from within interrupts, and therefore + * by rights should be declared volatile. However, they are only modified in a + * functionally atomic way (within critical sections of with the scheduler + * suspended) and are either passed by reference into a function or indexed via + * a volatile variable. Therefore, in all use cases tested so far, the volatile + * qualifier can be omitted in order to provide a moderate performance + * improvement without adversely affecting functional behaviour. The assembly + * instructions generated by the IAR, ARM and GCC compilers when the respective + * compiler's options were set for maximum optimisation has been inspected and + * deemed to be as intended. That said, as compiler technology advances, and + * especially if aggressive cross module optimisation is used (a use case that + * has not been exercised to any great extend) then it is feasible that the + * volatile qualifier will be needed for correct optimisation. It is expected + * that a compiler removing essential code because, without the volatile + * qualifier on the list structure members and with aggressive cross module + * optimisation, the compiler deemed the code unnecessary will result in + * complete and obvious failure of the scheduler. If this is ever experienced + * then the volatile qualifier can be inserted in the relevant places within the + * list structures by simply defining configLIST_VOLATILE to volatile in + * FreeRTOSConfig.h (as per the example at the bottom of this comment block). + * If configLIST_VOLATILE is not defined then the preprocessor directives below + * will simply #define configLIST_VOLATILE away completely. + * + * To use volatile list structure members then add the following line to + * FreeRTOSConfig.h (without the quotes): + * "#define configLIST_VOLATILE volatile" + */ +#ifndef configLIST_VOLATILE + #define configLIST_VOLATILE +#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macros that can be used to place known values within the list structures, +then check that the known values do not get corrupted during the execution of +the application. These may catch the list data structures being overwritten in +memory. They will not catch data errors caused by incorrect configuration or +use of FreeRTOS.*/ +#if( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) + /* Define the macros to do nothing. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) + #define listTEST_LIST_INTEGRITY( pxList ) +#else + /* Define macros that add new members into the list structures. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1; + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2; + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1; + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2; + + /* Define macros that set the new structure members to known values. */ + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + + /* Define macros that will assert if one of the structure members does not + contain its expected value. */ + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) + #define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) +#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */ + + +/* + * Definition of the only type of object that a list can contain. + */ +struct xLIST; +struct xLIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +}; +typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ + +struct xMINI_LIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; + struct xLIST_ITEM * configLIST_VOLATILE pxNext; + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; +}; +typedef struct xMINI_LIST_ITEM MiniListItem_t; + +/* + * Definition of the type of queue used by the scheduler. + */ +typedef struct xLIST +{ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + volatile UBaseType_t uxNumberOfItems; + ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +} List_t; + +/* + * Access macro to set the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) ) + +/* + * Access macro to get the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner ) + +/* + * Access macro to set the value of the list item. In most cases the value is + * used to sort the list in descending order. + * + * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) ) + +/* + * Access macro to retrieve the value of the list item. The value can + * represent anything - for example the priority of a task, or the time at + * which a task should be unblocked. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue ) + +/* + * Access macro to retrieve the value of the list item at the head of a given + * list. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_NEXT listGET_NEXT + * \ingroup LinkedList + */ +#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext ) + +/* + * Return the list item that marks the end of the list + * + * \page listGET_END_MARKER listGET_END_MARKER + * \ingroup LinkedList + */ +#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) ) + +/* + * Access macro to determine if a list contains any items. The macro will + * only have the value true if the list is empty. + * + * \page listLIST_IS_EMPTY listLIST_IS_EMPTY + * \ingroup LinkedList + */ +#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE ) + +/* + * Access macro to return the number of items in the list. + */ +#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems ) + +/* + * Access function to obtain the owner of the next entry in a list. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list + * and returns that entry's pxOwner parameter. Using multiple calls to this + * function it is therefore possible to move through every item contained in + * a list. + * + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxTCB pxTCB is set to the address of the owner of the next list item. + * @param pxList The list from which the next item owner is to be returned. + * + * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ +{ \ +List_t * const pxConstList = ( pxList ); \ + /* Increment the index to the next item and return the item, ensuring */ \ + /* we don't return the marker used at the end of the list. */ \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ + { \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + } \ + ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ +} + + +/* + * Access function to obtain the owner of the first entry in a list. Lists + * are normally sorted in ascending item value order. + * + * This function returns the pxOwner member of the first item in the list. + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxList The list from which the owner of the head item is to be + * returned. + * + * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( (&( ( pxList )->xListEnd ))->pxNext->pvOwner ) + +/* + * Check to see if a list item is within a list. The list item maintains a + * "container" pointer that points to the list it is in. All this macro does + * is check to see if the container and the list match. + * + * @param pxList The list we want to know if the list item is within. + * @param pxListItem The list item we want to know if is in the list. + * @return pdTRUE if the list item is in the list, otherwise pdFALSE. + */ +#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) ) + +/* + * Return the list a list item is contained within (referenced from). + * + * @param pxListItem The list item being queried. + * @return A pointer to the List_t object that references the pxListItem + */ +#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pxContainer ) + +/* + * This provides a crude means of knowing if a list has been initialised, as + * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise() + * function. + */ +#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY ) + +/* + * Must be called before a list is used! This initialises all the members + * of the list structure and inserts the xListEnd item into the list as a + * marker to the back of the list. + * + * @param pxList Pointer to the list being initialised. + * + * \page vListInitialise vListInitialise + * \ingroup LinkedList + */ +void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION; + +/* + * Must be called before a list item is used. This sets the list container to + * null so the item does not think that it is already contained in a list. + * + * @param pxItem Pointer to the list item being initialised. + * + * \page vListInitialiseItem vListInitialiseItem + * \ingroup LinkedList + */ +void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted into the list in + * a position determined by its item value (descending item value order). + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The item that is to be placed in the list. + * + * \page vListInsert vListInsert + * \ingroup LinkedList + */ +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted in a position + * such that it will be the last item within the list returned by multiple + * calls to listGET_OWNER_OF_NEXT_ENTRY. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list. + * Placing an item in a list using vListInsertEnd effectively places the item + * in the list position pointed to by pxIndex. This means that every other + * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before + * the pxIndex parameter again points to the item being inserted. + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The list item to be inserted into the list. + * + * \page vListInsertEnd vListInsertEnd + * \ingroup LinkedList + */ +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Remove an item from a list. The list item has a pointer to the list that + * it is in, so only the list item need be passed into the function. + * + * @param uxListRemove The item to be removed. The item will remove itself from + * the list pointed to by it's pxContainer parameter. + * + * @return The number of items that remain in the list after the list item has + * been removed. + * + * \page uxListRemove uxListRemove + * \ingroup LinkedList + */ +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION; + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h new file mode 100644 index 0000000..9ee3f4d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/message_buffer.h @@ -0,0 +1,799 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +/* + * Message buffers build functionality on top of FreeRTOS stream buffers. + * Whereas stream buffers are used to send a continuous stream of data from one + * task or interrupt to another, message buffers are used to send variable + * length discrete messages from one task or interrupt to another. Their + * implementation is light weight, making them particularly suited for interrupt + * to task and core to core communication scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * timeout to 0. + * + * Message buffers hold variable length messages. To enable that, when a + * message is written to the message buffer an additional sizeof( size_t ) bytes + * are also written to store the message's length (that happens internally, with + * the API function). sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so writing a 10 byte message to a message buffer on a 32-bit + * architecture will actually reduce the available space in the message buffer + * by 14 bytes (10 byte are used by the message, and 4 bytes to hold the length + * of the message). + */ + +#ifndef FREERTOS_MESSAGE_BUFFER_H +#define FREERTOS_MESSAGE_BUFFER_H + +/* Message buffers are built onto of stream buffers. */ +#include "stream_buffer.h" + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which message buffers are referenced. For example, a call to + * xMessageBufferCreate() returns an MessageBufferHandle_t variable that can + * then be used as a parameter to xMessageBufferSend(), xMessageBufferReceive(), + * etc. + */ +typedef void * MessageBufferHandle_t; + +/*-----------------------------------------------------------*/ + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreate( size_t xBufferSizeBytes ); ++ * + * Creates a new message buffer using dynamically allocated memory. See + * xMessageBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xMessageBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes (not messages) the message + * buffer will be able to hold at any one time. When a message is written to + * the message buffer an additional sizeof( size_t ) bytes are also written to + * store the message's length. sizeof( size_t ) is typically 4 bytes on a + * 32-bit architecture, so on most 32-bit architectures a 10 byte message will + * take up 14 bytes of message buffer space. + * + * @return If NULL is returned, then the message buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the message buffer data structures and storage area. A non-NULL value being + * returned indicates that the message buffer has been created successfully - + * the returned value should be stored as the handle to the created message + * buffer. + * + * Example use: +
+ +void vAFunction( void ) +{ +MessageBufferHandle_t xMessageBuffer; +const size_t xMessageBufferSizeBytes = 100; + + // Create a message buffer that can hold 100 bytes. The memory used to hold + // both the message buffer structure and the messages themselves is allocated + // dynamically. Each message added to the buffer consumes an additional 4 + // bytes which are used to hold the lengh of the message. + xMessageBuffer = xMessageBufferCreate( xMessageBufferSizeBytes ); + + if( xMessageBuffer == NULL ) + { + // There was not enough heap memory space available to create the + // message buffer. + } + else + { + // The message buffer was created successfully and can now be used. + } + ++ * \defgroup xMessageBufferCreate xMessageBufferCreate + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreate( xBufferSizeBytes ) ( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE ) + +/** + * message_buffer.h + * +
+MessageBufferHandle_t xMessageBufferCreateStatic( size_t xBufferSizeBytes, + uint8_t *pucMessageBufferStorageArea, + StaticMessageBuffer_t *pxStaticMessageBuffer ); ++ * Creates a new message buffer using statically allocated memory. See + * xMessageBufferCreate() for a version that uses dynamically allocated memory. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucMessageBufferStorageArea parameter. When a message is written to the + * message buffer an additional sizeof( size_t ) bytes are also written to store + * the message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so on most 32-bit architecture a 10 byte message will take up + * 14 bytes of message buffer space. The maximum number of bytes that can be + * stored in the message buffer is actually (xBufferSizeBytes - 1). + * + * @param pucMessageBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which messages are + * copied when they are written to the message buffer. + * + * @param pxStaticMessageBuffer Must point to a variable of type + * StaticMessageBuffer_t, which will be used to hold the message buffer's data + * structure. + * + * @return If the message buffer is created successfully then a handle to the + * created message buffer is returned. If either pucMessageBufferStorageArea or + * pxStaticmessageBuffer are NULL then NULL is returned. + * + * Example use: +
+ +// Used to dimension the array used to hold the messages. The available space +// will actually be one less than this, so 999. +#define STORAGE_SIZE_BYTES 1000 + +// Defines the memory that will actually hold the messages within the message +// buffer. +static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ]; + +// The variable used to hold the message buffer structure. +StaticMessageBuffer_t xMessageBufferStruct; + +void MyFunction( void ) +{ +MessageBufferHandle_t xMessageBuffer; + + xMessageBuffer = xMessageBufferCreateStatic( sizeof( ucBufferStorage ), + ucBufferStorage, + &xMessageBufferStruct ); + + // As neither the pucMessageBufferStorageArea or pxStaticMessageBuffer + // parameters were NULL, xMessageBuffer will not be NULL, and can be used to + // reference the created message buffer in other message buffer API calls. + + // Other code that uses the message buffer can go here. +} + ++ * \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic + * \ingroup MessageBufferManagement + */ +#define xMessageBufferCreateStatic( xBufferSizeBytes, pucMessageBufferStorageArea, pxStaticMessageBuffer ) ( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer ) + +/** + * message_buffer.h + * +
+size_t xMessageBufferSend( MessageBufferHandle_t xMessageBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ); ++ * + * Sends a discrete message to the message buffer. The message can be any + * length that fits within the buffer's free space, and is copied into the + * buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferSend() to write to a message buffer from a task. Use + * xMessageBufferSendFromISR() to write to a message buffer from an interrupt + * service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer to which a message is + * being sent. + * + * @param pvTxData A pointer to the message that is to be copied into the + * message buffer. + * + * @param xDataLengthBytes The length of the message. That is, the number of + * bytes to copy from pvTxData into the message buffer. When a message is + * written to the message buffer an additional sizeof( size_t ) bytes are also + * written to store the message's length. sizeof( size_t ) is typically 4 bytes + * on a 32-bit architecture, so on most 32-bit architecture setting + * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24 + * bytes (20 bytes of message data and 4 bytes to hold the message length). + * + * @param xTicksToWait The maximum amount of time the calling task should remain + * in the Blocked state to wait for enough space to become available in the + * message buffer, should the message buffer have insufficient space when + * xMessageBufferSend() is called. The calling task will never block if + * xTicksToWait is zero. The block time is specified in tick periods, so the + * absolute time it represents is dependent on the tick frequency. The macro + * pdMS_TO_TICKS() can be used to convert a time specified in milliseconds into + * a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will cause + * the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The number of bytes written to the message buffer. If the call to + * xMessageBufferSend() times out before there was enough space to write the + * message into the message buffer then zero is returned. If the call did not + * time out then xDataLengthBytes is returned. + * + * Example use: ++void vAFunction( MessageBufferHandle_t xMessageBuffer ) +{ +size_t xBytesSent; +uint8_t ucArrayToSend[] = { 0, 1, 2, 3 }; +char *pcStringToSend = "String to send"; +const TickType_t x100ms = pdMS_TO_TICKS( 100 ); + + // Send an array to the message buffer, blocking for a maximum of 100ms to + // wait for enough space to be available in the message buffer. + xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms ); + + if( xBytesSent != sizeof( ucArrayToSend ) ) + { + // The call to xMessageBufferSend() times out before there was enough + // space in the buffer for the data to be written. + } + + // Send the string to the message buffer. Return immediately if there is + // not enough space in the buffer. + xBytesSent = xMessageBufferSend( xMessageBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The string could not be added to the message buffer because there was + // not enough free space in the buffer. + } +} ++ * \defgroup xMessageBufferSend xMessageBufferSend + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSend( xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait ) + +/** + * message_buffer.h + * ++size_t xMessageBufferSendFromISR( MessageBufferHandle_t xMessageBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * Interrupt safe version of the API function that sends a discrete message to + * the message buffer. The message can be any length that fits within the + * buffer's free space, and is copied into the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferSend() to write to a message buffer from a task. Use + * xMessageBufferSendFromISR() to write to a message buffer from an interrupt + * service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer to which a message is + * being sent. + * + * @param pvTxData A pointer to the message that is to be copied into the + * message buffer. + * + * @param xDataLengthBytes The length of the message. That is, the number of + * bytes to copy from pvTxData into the message buffer. When a message is + * written to the message buffer an additional sizeof( size_t ) bytes are also + * written to store the message's length. sizeof( size_t ) is typically 4 bytes + * on a 32-bit architecture, so on most 32-bit architecture setting + * xDataLengthBytes to 20 will reduce the free space in the message buffer by 24 + * bytes (20 bytes of message data and 4 bytes to hold the message length). + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for data. Calling + * xMessageBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xMessageBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xMessageBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xMessageBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the code example below for an example. + * + * @return The number of bytes actually written to the message buffer. If the + * message buffer didn't have enough free space for the message to be stored + * then 0 is returned, otherwise xDataLengthBytes is returned. + * + * Example use: ++// A message buffer that has already been created. +MessageBufferHandle_t xMessageBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +size_t xBytesSent; +char *pcStringToSend = "String to send"; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Attempt to send the string to the message buffer. + xBytesSent = xMessageBufferSendFromISR( xMessageBuffer, + ( void * ) pcStringToSend, + strlen( pcStringToSend ), + &xHigherPriorityTaskWoken ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The string could not be added to the message buffer because there was + // not enough free space in the buffer. + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xMessageBufferSendFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSendFromISR( xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++size_t xMessageBufferReceive( MessageBufferHandle_t xMessageBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ); ++ * + * Receives a discrete message from a message buffer. Messages can be of + * variable length and are copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for a message, should the message buffer be empty. + * xMessageBufferReceive() will return immediately if xTicksToWait is zero and + * the message buffer is empty. The block time is specified in tick periods, so + * the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. Tasks do not use any + * CPU time when they are in the Blocked state. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. If xMessageBufferReceive() times out before a message became available + * then zero is returned. If the length of the message is greater than + * xBufferLengthBytes then the message will be left in the message buffer and + * zero is returned. + * + * Example use: ++void vAFunction( MessageBuffer_t xMessageBuffer ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +const TickType_t xBlockTime = pdMS_TO_TICKS( 20 ); + + // Receive the next message from the message buffer. Wait in the Blocked + // state (so not using any CPU processing time) for a maximum of 100ms for + // a message to become available. + xReceivedBytes = xMessageBufferReceive( xMessageBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + xBlockTime ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains a message that is xReceivedBytes long. Process + // the message here.... + } +} ++ * \defgroup xMessageBufferReceive xMessageBufferReceive + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceive( xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) xStreamBufferReceive( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, xTicksToWait ) + + +/** + * message_buffer.h + * ++size_t xMessageBufferReceiveFromISR( MessageBufferHandle_t xMessageBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * An interrupt safe version of the API function that receives a discrete + * message from a message buffer. Messages can be of variable length and are + * copied out of the buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xMessageBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xMessageBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xMessageBufferReceive() to read from a message buffer from a task. Use + * xMessageBufferReceiveFromISR() to read from a message buffer from an + * interrupt service routine (ISR). + * + * @param xMessageBuffer The handle of the message buffer from which a message + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received message is + * to be copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the pvRxData + * parameter. This sets the maximum length of the message that can be received. + * If xBufferLengthBytes is too small to hold the next message then the message + * will be left in the message buffer and 0 will be returned. + * + * @param pxHigherPriorityTaskWoken It is possible that a message buffer will + * have a task blocked on it waiting for space to become available. Calling + * xMessageBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xMessageBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xMessageBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xMessageBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The length, in bytes, of the message read from the message buffer, if + * any. + * + * Example use: ++// A message buffer that has already been created. +MessageBuffer_t xMessageBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Receive the next message from the message buffer. + xReceivedBytes = xMessageBufferReceiveFromISR( xMessageBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + &xHigherPriorityTaskWoken ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains a message that is xReceivedBytes long. Process + // the message here.... + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xMessageBufferReceiveFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReceiveFromISR( xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++void vMessageBufferDelete( MessageBufferHandle_t xMessageBuffer ); ++ * + * Deletes a message buffer that was previously created using a call to + * xMessageBufferCreate() or xMessageBufferCreateStatic(). If the message + * buffer was created using dynamic memory (that is, by xMessageBufferCreate()), + * then the allocated memory is freed. + * + * A message buffer handle must not be used after the message buffer has been + * deleted. + * + * @param xMessageBuffer The handle of the message buffer to be deleted. + * + */ +#define vMessageBufferDelete( xMessageBuffer ) vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) ); ++ * + * Tests to see if a message buffer is full. A message buffer is full if it + * cannot accept any more messages, of any size, until space is made available + * by a message being removed from the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is full then + * pdTRUE is returned. Otherwise pdFALSE is returned. + */ +#define xMessageBufferIsFull( xMessageBuffer ) xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) ); ++ * + * Tests to see if a message buffer is empty (does not contain any messages). + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return If the message buffer referenced by xMessageBuffer is empty then + * pdTRUE is returned. Otherwise pdFALSE is returned. + * + */ +#define xMessageBufferIsEmpty( xMessageBuffer ) xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer ) + +/** + * message_buffer.h ++BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer ); ++ * + * Resets a message buffer to its initial empty state, discarding any message it + * contained. + * + * A message buffer can only be reset if there are no tasks blocked on it. + * + * @param xMessageBuffer The handle of the message buffer being reset. + * + * @return If the message buffer was reset then pdPASS is returned. If the + * message buffer could not be reset because either there was a task blocked on + * the message queue to wait for space to become available, or to wait for a + * a message to be available, then pdFAIL is returned. + * + * \defgroup xMessageBufferReset xMessageBufferReset + * \ingroup MessageBufferManagement + */ +#define xMessageBufferReset( xMessageBuffer ) xStreamBufferReset( ( StreamBufferHandle_t ) xMessageBuffer ) + + +/** + * message_buffer.h ++size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) ); ++ * Returns the number of bytes of free space in the message buffer. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The number of bytes that can be written to the message buffer before + * the message buffer would be full. When a message is written to the message + * buffer an additional sizeof( size_t ) bytes are also written to store the + * message's length. sizeof( size_t ) is typically 4 bytes on a 32-bit + * architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size + * of the largest message that can be written to the message buffer is 6 bytes. + * + * \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable + * \ingroup MessageBufferManagement + */ +#define xMessageBufferSpaceAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) +#define xMessageBufferSpacesAvailable( xMessageBuffer ) xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */ + +/** + * message_buffer.h ++ size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) ); ++ * Returns the length (in bytes) of the next message in a message buffer. + * Useful if xMessageBufferReceive() returned 0 because the size of the buffer + * passed into xMessageBufferReceive() was too small to hold the next message. + * + * @param xMessageBuffer The handle of the message buffer being queried. + * + * @return The length (in bytes) of the next message in the message buffer, or 0 + * if the message buffer is empty. + * + * \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes + * \ingroup MessageBufferManagement + */ +#define xMessageBufferNextLengthBytes( xMessageBuffer ) xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION; + +/** + * message_buffer.h + * ++BaseType_t xMessageBufferSendCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xMessageBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferSendCompletedFromISR(). If calling + * xMessageBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferSendCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +/** + * message_buffer.h + * ++BaseType_t xMessageBufferReceiveCompletedFromISR( MessageBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xMessageBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xMessageBufferReceiveCompletedFromISR(). If calling + * xMessageBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +#define xMessageBufferReceiveCompletedFromISR( xMessageBuffer, pxHigherPriorityTaskWoken ) xStreamBufferReceiveCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken ) + +#if defined( __cplusplus ) +} /* extern "C" */ +#endif + +#endif /* !defined( FREERTOS_MESSAGE_BUFFER_H ) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h new file mode 100644 index 0000000..f0b3337 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_prototypes.h @@ -0,0 +1,157 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * When the MPU is used the standard (non MPU) API functions are mapped to + * equivalents that start "MPU_", the prototypes for which are defined in this + * header files. This will cause the application code to call the MPU_ version + * which wraps the non-MPU version with privilege promoting then demoting code, + * so the kernel code always runs will full privileges. + */ + + +#ifndef MPU_PROTOTYPES_H +#define MPU_PROTOTYPES_H + +/* MPU versions of tasks.h API functions. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetHandle( const char *pcNameToQuery ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL; +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskGetRunTimeStats( char *pcWriteBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +uint32_t MPU_ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskNotifyStateClear( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of queue.h API functions. */ +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcName ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL; +void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of timers.h API functions. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL; +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, const TickType_t xTimerPeriodInTicks, const UBaseType_t uxAutoReload, void * const pvTimerID, TimerCallbackFunction_t pxCallbackFunction, StaticTimer_t *pxTimerBuffer ) FREERTOS_SYSTEM_CALL; +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of event_group.h API functions. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL; +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t *pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL; +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL; +UBaseType_t MPU_uxEventGroupGetNumber( void* xEventGroup ) FREERTOS_SYSTEM_CALL; + +/* MPU versions of message/stream_buffer.h API functions. */ +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, const void *pvTxData, size_t xDataLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void *pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) FREERTOS_SYSTEM_CALL; +StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer ) FREERTOS_SYSTEM_CALL; + + + +#endif /* MPU_PROTOTYPES_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h new file mode 100644 index 0000000..56d9c7e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/mpu_wrappers.h @@ -0,0 +1,186 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef MPU_WRAPPERS_H +#define MPU_WRAPPERS_H + +/* This file redefines API functions to be called through a wrapper macro, but +only for ports that are using the MPU. */ +#ifdef portUSING_MPU_WRAPPERS + + /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE will be defined when this file is + included from queue.c or task.c to prevent it from having an effect within + those files. */ + #ifndef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + + /* + * Map standard (non MPU) API functions to equivalents that start + * "MPU_". This will cause the application code to call the MPU_ + * version, which wraps the non-MPU version with privilege promoting + * then demoting code, so the kernel code always runs will full + * privileges. + */ + + /* Map standard tasks.h API functions to the MPU equivalents. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define vTaskDelete MPU_vTaskDelete + #define vTaskDelay MPU_vTaskDelay + #define vTaskDelayUntil MPU_vTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define vTaskSuspendAll MPU_vTaskSuspendAll + #define xTaskResumeAll MPU_xTaskResumeAll + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define xTaskGetHandle MPU_xTaskGetHandle + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define vTaskList MPU_vTaskList + #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats + #define xTaskGetIdleRunTimeCounter MPU_xTaskGetIdleRunTimeCounter + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskNotifyWait MPU_xTaskNotifyWait + #define ulTaskNotifyTake MPU_ulTaskNotifyTake + #define xTaskNotifyStateClear MPU_xTaskNotifyStateClear + + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + /* Map standard queue.h API functions to the MPU equivalents. */ + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define vQueueDelete MPU_vQueueDelete + #define xQueueCreateMutex MPU_xQueueCreateMutex + #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic + #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore + #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueGenericCreate MPU_xQueueGenericCreate + #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueCreateSet MPU_xQueueCreateSet + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + #define xQueueGenericReset MPU_xQueueGenericReset + + #if( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif + + /* Map standard timer.h API functions to the MPU equivalents. */ + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerPendFunctionCall MPU_xTimerPendFunctionCall + #define pcTimerGetName MPU_pcTimerGetName + #define vTimerSetReloadMode MPU_vTimerSetReloadMode + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + #define xTimerGenericCommand MPU_xTimerGenericCommand + + /* Map standard event_group.h API functions to the MPU equivalents. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + #define vEventGroupDelete MPU_vEventGroupDelete + + /* Map standard message/stream_buffer.h API functions to the MPU + equivalents. */ + #define xStreamBufferSend MPU_xStreamBufferSend + #define xStreamBufferReceive MPU_xStreamBufferReceive + #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferIsFull MPU_xStreamBufferIsFull + #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty + #define xStreamBufferReset MPU_xStreamBufferReset + #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable + #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable + #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + + + /* Remove the privileged function macro, but keep the PRIVILEGED_DATA + macro so applications can place data in privileged access sections + (useful when using statically allocated objects). */ + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL + + #else /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + + /* Ensure API functions go in the privileged execution section. */ + #define PRIVILEGED_FUNCTION __attribute__((section("privileged_functions"))) + #define PRIVILEGED_DATA __attribute__((section("privileged_data"))) + #define FREERTOS_SYSTEM_CALL __attribute__((section( "freertos_system_calls"))) + + #endif /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE */ + +#else /* portUSING_MPU_WRAPPERS */ + + #define PRIVILEGED_FUNCTION + #define PRIVILEGED_DATA + #define FREERTOS_SYSTEM_CALL + #define portUSING_MPU_WRAPPERS 0 + +#endif /* portUSING_MPU_WRAPPERS */ + + +#endif /* MPU_WRAPPERS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h new file mode 100644 index 0000000..a1bb449 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/portable.h @@ -0,0 +1,181 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Portable layer API. Each function must be defined for each port. + *----------------------------------------------------------*/ + +#ifndef PORTABLE_H +#define PORTABLE_H + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a +pre-processor definition was used to ensure the pre-processor found the correct +portmacro.h file for the port being used. That scheme was deprecated in favour +of setting the compiler's include path such that it found the correct +portmacro.h file - removing the need for the constant and allowing the +portmacro.h file to be located anywhere in relation to the port being used. +Purely for reasons of backward compatibility the old method is still valid, but +to make it clear that new projects should not use it, support for the port +specific constants has been moved into the deprecated_definitions.h header +file. */ +#include "deprecated_definitions.h" + +/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h +did not result in a portmacro.h header file being included - and it should be +included here. In this case the path to the correct portmacro.h header file +must be set in the compiler's include path. */ +#ifndef portENTER_CRITICAL + #include "portmacro.h" +#endif + +#if portBYTE_ALIGNMENT == 32 + #define portBYTE_ALIGNMENT_MASK ( 0x001f ) +#endif + +#if portBYTE_ALIGNMENT == 16 + #define portBYTE_ALIGNMENT_MASK ( 0x000f ) +#endif + +#if portBYTE_ALIGNMENT == 8 + #define portBYTE_ALIGNMENT_MASK ( 0x0007 ) +#endif + +#if portBYTE_ALIGNMENT == 4 + #define portBYTE_ALIGNMENT_MASK ( 0x0003 ) +#endif + +#if portBYTE_ALIGNMENT == 2 + #define portBYTE_ALIGNMENT_MASK ( 0x0001 ) +#endif + +#if portBYTE_ALIGNMENT == 1 + #define portBYTE_ALIGNMENT_MASK ( 0x0000 ) +#endif + +#ifndef portBYTE_ALIGNMENT_MASK + #error "Invalid portBYTE_ALIGNMENT definition" +#endif + +#ifndef portNUM_CONFIGURABLE_REGIONS + #define portNUM_CONFIGURABLE_REGIONS 1 +#endif + +#ifndef portHAS_STACK_OVERFLOW_CHECKING + #define portHAS_STACK_OVERFLOW_CHECKING 0 +#endif + +#ifndef portARCH_NAME + #define portARCH_NAME NULL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mpu_wrappers.h" + +/* + * Setup the stack of a new task so it is ready to be placed under the + * scheduler control. The registers have to be placed on the stack in + * the order that the port expects to find them. + * + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #endif +#else + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, StackType_t *pxEndOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #else + StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; + #endif +#endif + +/* Used by heap_5.c. */ +typedef struct HeapRegion +{ + uint8_t *pucStartAddress; + size_t xSizeInBytes; +} HeapRegion_t; + +/* + * Used to define multiple heap regions for use by heap_5.c. This function + * must be called before any calls to pvPortMalloc() - not creating a task, + * queue, semaphore, mutex, software timer, event group, etc. will result in + * pvPortMalloc being called. + * + * pxHeapRegions passes in an array of HeapRegion_t structures - each of which + * defines a region of memory that can be used as the heap. The array is + * terminated by a HeapRegions_t structure that has a size of 0. The region + * with the lowest start address must appear first in the array. + */ +void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION; + + +/* + * Map to the memory management routines required for the port. + */ +void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; +void vPortFree( void *pv ) PRIVILEGED_FUNCTION; +void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION; + +/* + * Setup the hardware ready for the scheduler to take control. This generally + * sets up a tick interrupt and sets timers for the correct tick frequency. + */ +BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so + * the hardware is left in its original condition after the scheduler stops + * executing. + */ +void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * The structures and methods of manipulating the MPU are contained within the + * port layer. + * + * Fills the xMPUSettings structure with the memory region information + * contained in xRegions. + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + struct xMEMORY_REGION; + void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* PORTABLE_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h new file mode 100644 index 0000000..dbcf9bb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/projdefs.h @@ -0,0 +1,124 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef PROJDEFS_H +#define PROJDEFS_H + +/* + * Defines the prototype to which task functions must conform. Defined in this + * file to ensure the type is known before portable.h is included. + */ +typedef void (*TaskFunction_t)( void * ); + +/* Converts a time in milliseconds to a time in ticks. This macro can be +overridden by a macro of the same name defined in FreeRTOSConfig.h in case the +definition here is not suitable for your application. */ +#ifndef pdMS_TO_TICKS + #define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000 ) ) +#endif + +#define pdFALSE ( ( BaseType_t ) 0 ) +#define pdTRUE ( ( BaseType_t ) 1 ) + +#define pdPASS ( pdTRUE ) +#define pdFAIL ( pdFALSE ) +#define errQUEUE_EMPTY ( ( BaseType_t ) 0 ) +#define errQUEUE_FULL ( ( BaseType_t ) 0 ) + +/* FreeRTOS error definitions. */ +#define errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ( -1 ) +#define errQUEUE_BLOCKED ( -4 ) +#define errQUEUE_YIELD ( -5 ) + +/* Macros used for basic data corruption checks. */ +#ifndef configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES + #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 +#endif + +#if( configUSE_16_BIT_TICKS == 1 ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a +#else + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#endif + +/* The following errno values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_ERRNO_NONE 0 /* No errors */ +#define pdFREERTOS_ERRNO_ENOENT 2 /* No such file or directory */ +#define pdFREERTOS_ERRNO_EINTR 4 /* Interrupted system call */ +#define pdFREERTOS_ERRNO_EIO 5 /* I/O error */ +#define pdFREERTOS_ERRNO_ENXIO 6 /* No such device or address */ +#define pdFREERTOS_ERRNO_EBADF 9 /* Bad file number */ +#define pdFREERTOS_ERRNO_EAGAIN 11 /* No more processes */ +#define pdFREERTOS_ERRNO_EWOULDBLOCK 11 /* Operation would block */ +#define pdFREERTOS_ERRNO_ENOMEM 12 /* Not enough memory */ +#define pdFREERTOS_ERRNO_EACCES 13 /* Permission denied */ +#define pdFREERTOS_ERRNO_EFAULT 14 /* Bad address */ +#define pdFREERTOS_ERRNO_EBUSY 16 /* Mount device busy */ +#define pdFREERTOS_ERRNO_EEXIST 17 /* File exists */ +#define pdFREERTOS_ERRNO_EXDEV 18 /* Cross-device link */ +#define pdFREERTOS_ERRNO_ENODEV 19 /* No such device */ +#define pdFREERTOS_ERRNO_ENOTDIR 20 /* Not a directory */ +#define pdFREERTOS_ERRNO_EISDIR 21 /* Is a directory */ +#define pdFREERTOS_ERRNO_EINVAL 22 /* Invalid argument */ +#define pdFREERTOS_ERRNO_ENOSPC 28 /* No space left on device */ +#define pdFREERTOS_ERRNO_ESPIPE 29 /* Illegal seek */ +#define pdFREERTOS_ERRNO_EROFS 30 /* Read only file system */ +#define pdFREERTOS_ERRNO_EUNATCH 42 /* Protocol driver not attached */ +#define pdFREERTOS_ERRNO_EBADE 50 /* Invalid exchange */ +#define pdFREERTOS_ERRNO_EFTYPE 79 /* Inappropriate file type or format */ +#define pdFREERTOS_ERRNO_ENMFILE 89 /* No more files */ +#define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ +#define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ +#define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ +#define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ +#define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ +#define pdFREERTOS_ERRNO_ETIMEDOUT 116 /* Connection timed out */ +#define pdFREERTOS_ERRNO_EINPROGRESS 119 /* Connection already in progress */ +#define pdFREERTOS_ERRNO_EALREADY 120 /* Socket already connected */ +#define pdFREERTOS_ERRNO_EADDRNOTAVAIL 125 /* Address not available */ +#define pdFREERTOS_ERRNO_EISCONN 127 /* Socket is already connected */ +#define pdFREERTOS_ERRNO_ENOTCONN 128 /* Socket is not connected */ +#define pdFREERTOS_ERRNO_ENOMEDIUM 135 /* No medium inserted */ +#define pdFREERTOS_ERRNO_EILSEQ 138 /* An invalid UTF-16 sequence was encountered. */ +#define pdFREERTOS_ERRNO_ECANCELED 140 /* Operation canceled. */ + +/* The following endian values are used by FreeRTOS+ components, not FreeRTOS +itself. */ +#define pdFREERTOS_LITTLE_ENDIAN 0 +#define pdFREERTOS_BIG_ENDIAN 1 + +/* Re-defining endian values for generic naming. */ +#define pdLITTLE_ENDIAN pdFREERTOS_LITTLE_ENDIAN +#define pdBIG_ENDIAN pdFREERTOS_BIG_ENDIAN + + +#endif /* PROJDEFS_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h new file mode 100644 index 0000000..5072c45 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/queue.h @@ -0,0 +1,1655 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef QUEUE_H +#define QUEUE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include queue.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include "task.h" + +/** + * Type by which queues are referenced. For example, a call to xQueueCreate() + * returns an QueueHandle_t variable that can then be used as a parameter to + * xQueueSend(), xQueueReceive(), etc. + */ +struct QueueDefinition; /* Using old naming convention so as not to break kernel aware debuggers. */ +typedef struct QueueDefinition * QueueHandle_t; + +/** + * Type by which queue sets are referenced. For example, a call to + * xQueueCreateSet() returns an xQueueSet variable that can then be used as a + * parameter to xQueueSelectFromSet(), xQueueAddToSet(), etc. + */ +typedef struct QueueDefinition * QueueSetHandle_t; + +/** + * Queue sets can contain both queues and semaphores, so the + * QueueSetMemberHandle_t is defined as a type to be used where a parameter or + * return value can be either an QueueHandle_t or an SemaphoreHandle_t. + */ +typedef struct QueueDefinition * QueueSetMemberHandle_t; + +/* For internal use only. */ +#define queueSEND_TO_BACK ( ( BaseType_t ) 0 ) +#define queueSEND_TO_FRONT ( ( BaseType_t ) 1 ) +#define queueOVERWRITE ( ( BaseType_t ) 2 ) + +/* For internal use only. These definitions *must* match those in queue.c. */ +#define queueQUEUE_TYPE_BASE ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_SET ( ( uint8_t ) 0U ) +#define queueQUEUE_TYPE_MUTEX ( ( uint8_t ) 1U ) +#define queueQUEUE_TYPE_COUNTING_SEMAPHORE ( ( uint8_t ) 2U ) +#define queueQUEUE_TYPE_BINARY_SEMAPHORE ( ( uint8_t ) 3U ) +#define queueQUEUE_TYPE_RECURSIVE_MUTEX ( ( uint8_t ) 4U ) + +/** + * queue. h + *+ QueueHandle_t xQueueCreate( + UBaseType_t uxQueueLength, + UBaseType_t uxItemSize + ); + *+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @return If the queue is successfully create then a handle to the newly + * created queue is returned. If the queue cannot be created then 0 is + * returned. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + }; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + if( xQueue1 == 0 ) + { + // Queue was not created and must not be used. + } + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue2 == 0 ) + { + // Queue was not created and must not be used. + } + + // ... Rest of task code. + } ++ * \defgroup xQueueCreate xQueueCreate + * \ingroup QueueManagement + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) ) +#endif + +/** + * queue. h + *+ QueueHandle_t xQueueCreateStatic( + UBaseType_t uxQueueLength, + UBaseType_t uxItemSize, + uint8_t *pucQueueStorageBuffer, + StaticQueue_t *pxQueueBuffer + ); + *+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @param pucQueueStorageBuffer If uxItemSize is not zero then + * pucQueueStorageBuffer must point to a uint8_t array that is at least large + * enough to hold the maximum number of items that can be in the queue at any + * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is + * zero then pucQueueStorageBuffer can be NULL. + * + * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which + * will be used to hold the queue's data structure. + * + * @return If the queue is created then a handle to the created queue is + * returned. If pxQueueBuffer is NULL then NULL is returned. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + }; + + #define QUEUE_LENGTH 10 + #define ITEM_SIZE sizeof( uint32_t ) + + // xQueueBuffer will hold the queue structure. + StaticQueue_t xQueueBuffer; + + // ucQueueStorage will hold the items posted to the queue. Must be at least + // [(queue length) * ( queue item size)] bytes long. + uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ]; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold. + ITEM_SIZE // The size of each item in the queue + &( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue. + &xQueueBuffer ); // The buffer that will hold the queue structure. + + // The queue is guaranteed to be created successfully as no dynamic memory + // allocation is used. Therefore xQueue1 is now a handle to a valid queue. + + // ... Rest of task code. + } ++ * \defgroup xQueueCreateStatic xQueueCreateStatic + * \ingroup QueueManagement + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * queue. h + *+ BaseType_t xQueueSendToToFront( + QueueHandle_t xQueue, + const void *pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * Post an item to the front of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSendToFront( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSendToFront( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToFront( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT ) + +/** + * queue. h + *+ BaseType_t xQueueSendToBack( + QueueHandle_t xQueue, + const void *pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * This is a macro that calls xQueueGenericSend(). + * + * Post an item to the back of a queue. The item is queued by copy, not by + * reference. This function must not be called from an interrupt service + * routine. See xQueueSendFromISR () for an alternative which may be used + * in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the queue + * is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSendToBack( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSendToBack( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSendToBack( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueSend( + QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait + ); + *+ * + * This is a macro that calls xQueueGenericSend(). It is included for + * backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToFront() and xQueueSendToBack() macros. It is + * equivalent to xQueueSendToBack(). + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10 ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0 ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +#define xQueueSend( xQueue, pvItemToQueue, xTicksToWait ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueOverwrite( + QueueHandle_t xQueue, + const void * pvItemToQueue + ); + *+ * + * Only for use with queues that have a length of one - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * This function must not be called from an interrupt service routine. + * See xQueueOverwriteFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle of the queue to which the data is being sent. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @return xQueueOverwrite() is a macro that calls xQueueGenericSend(), and + * therefore has the same return values as xQueueSendToFront(). However, pdPASS + * is the only value that can be returned because xQueueOverwrite() will write + * to the queue even when the queue is already full. + * + * Example usage: ++ + void vFunction( void *pvParameters ) + { + QueueHandle_t xQueue; + uint32_t ulVarToSend, ulValReceived; + + // Create a queue to hold one uint32_t value. It is strongly + // recommended *not* to use xQueueOverwrite() on queues that can + // contain more than one value, and doing so will trigger an assertion + // if configASSERT() is defined. + xQueue = xQueueCreate( 1, sizeof( uint32_t ) ); + + // Write the value 10 to the queue using xQueueOverwrite(). + ulVarToSend = 10; + xQueueOverwrite( xQueue, &ulVarToSend ); + + // Peeking the queue should now return 10, but leave the value 10 in + // the queue. A block time of zero is used as it is known that the + // queue holds a value. + ulValReceived = 0; + xQueuePeek( xQueue, &ulValReceived, 0 ); + + if( ulValReceived != 10 ) + { + // Error unless the item was removed by a different task. + } + + // The queue is still full. Use xQueueOverwrite() to overwrite the + // value held in the queue with 100. + ulVarToSend = 100; + xQueueOverwrite( xQueue, &ulVarToSend ); + + // This time read from the queue, leaving the queue empty once more. + // A block time of 0 is used again. + xQueueReceive( xQueue, &ulValReceived, 0 ); + + // The value read should be the last value written, even though the + // queue was already full when the value was written. + if( ulValReceived != 100 ) + { + // Error! + } + + // ... +} ++ * \defgroup xQueueOverwrite xQueueOverwrite + * \ingroup QueueManagement + */ +#define xQueueOverwrite( xQueue, pvItemToQueue ) xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), 0, queueOVERWRITE ) + + +/** + * queue. h + *+ BaseType_t xQueueGenericSend( + QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait + BaseType_t xCopyPosition + ); + *+ * + * It is preferred that the macros xQueueSend(), xQueueSendToFront() and + * xQueueSendToBack() are used in place of calling this function directly. + * + * Post an item on a queue. The item is queued by copy, not by reference. + * This function must not be called from an interrupt service routine. + * See xQueueSendFromISR () for an alternative which may be used in an ISR. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for space to become available on the queue, should it already + * be full. The call will return immediately if this is set to 0 and the + * queue is full. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the item was successfully posted, otherwise errQUEUE_FULL. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + uint32_t ulVar = 10UL; + + void vATask( void *pvParameters ) + { + QueueHandle_t xQueue1, xQueue2; + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 uint32_t values. + xQueue1 = xQueueCreate( 10, sizeof( uint32_t ) ); + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue2 = xQueueCreate( 10, sizeof( struct AMessage * ) ); + + // ... + + if( xQueue1 != 0 ) + { + // Send an uint32_t. Wait for 10 ticks for space to become + // available if necessary. + if( xQueueGenericSend( xQueue1, ( void * ) &ulVar, ( TickType_t ) 10, queueSEND_TO_BACK ) != pdPASS ) + { + // Failed to post the message, even after 10 ticks. + } + } + + if( xQueue2 != 0 ) + { + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueGenericSend( xQueue2, ( void * ) &pxMessage, ( TickType_t ) 0, queueSEND_TO_BACK ); + } + + // ... Rest of task code. + } ++ * \defgroup xQueueSend xQueueSend + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueuePeek( + QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait + );+ * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * This macro must not be used in an interrupt service routine. See + * xQueuePeekFromISR() for an alternative that can be called from an interrupt + * service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. The time is defined in tick periods so the constant + * portTICK_PERIOD_MS should be used to convert to real time if this is required. + * xQueuePeek() will return immediately if xTicksToWait is 0 and the queue + * is empty. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + QueueHandle_t xQueue; + + // Task to create a queue and post a value. + void vATask( void *pvParameters ) + { + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 ); + + // ... Rest of task code. + } + + // Task to peek the data from the queue. + void vADifferentTask( void *pvParameters ) + { + struct AMessage *pxRxedMessage; + + if( xQueue != 0 ) + { + // Peek a message on the created queue. Block for 10 ticks if a + // message is not immediately available. + if( xQueuePeek( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) ) + { + // pcRxedMessage now points to the struct AMessage variable posted + // by vATask, but the item still remains on the queue. + } + } + + // ... Rest of task code. + } ++ * \defgroup xQueuePeek xQueuePeek + * \ingroup QueueManagement + */ +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueuePeekFromISR( + QueueHandle_t xQueue, + void *pvBuffer, + );+ * + * A version of xQueuePeek() that can be called from an interrupt service + * routine (ISR). + * + * Receive an item from a queue without removing the item from the queue. + * The item is received by copy so a buffer of adequate size must be + * provided. The number of bytes copied into the buffer was defined when + * the queue was created. + * + * Successfully received items remain on the queue so will be returned again + * by the next call, or a call to xQueueReceive(). + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * \defgroup xQueuePeekFromISR xQueuePeekFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueReceive( + QueueHandle_t xQueue, + void *pvBuffer, + TickType_t xTicksToWait + );+ * + * Receive an item from a queue. The item is received by copy so a buffer of + * adequate size must be provided. The number of bytes copied into the buffer + * was defined when the queue was created. + * + * Successfully received items are removed from the queue. + * + * This function must not be used in an interrupt service routine. See + * xQueueReceiveFromISR for an alternative that can. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param xTicksToWait The maximum amount of time the task should block + * waiting for an item to receive should the queue be empty at the time + * of the call. xQueueReceive() will return immediately if xTicksToWait + * is zero and the queue is empty. The time is defined in tick periods so the + * constant portTICK_PERIOD_MS should be used to convert to real time if this is + * required. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ struct AMessage + { + char ucMessageID; + char ucData[ 20 ]; + } xMessage; + + QueueHandle_t xQueue; + + // Task to create a queue and post a value. + void vATask( void *pvParameters ) + { + struct AMessage *pxMessage; + + // Create a queue capable of containing 10 pointers to AMessage structures. + // These should be passed by pointer as they contain a lot of data. + xQueue = xQueueCreate( 10, sizeof( struct AMessage * ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Send a pointer to a struct AMessage object. Don't block if the + // queue is already full. + pxMessage = & xMessage; + xQueueSend( xQueue, ( void * ) &pxMessage, ( TickType_t ) 0 ); + + // ... Rest of task code. + } + + // Task to receive from the queue. + void vADifferentTask( void *pvParameters ) + { + struct AMessage *pxRxedMessage; + + if( xQueue != 0 ) + { + // Receive a message on the created queue. Block for 10 ticks if a + // message is not immediately available. + if( xQueueReceive( xQueue, &( pxRxedMessage ), ( TickType_t ) 10 ) ) + { + // pcRxedMessage now points to the struct AMessage variable posted + // by vATask. + } + } + + // ... Rest of task code. + } ++ * \defgroup xQueueReceive xQueueReceive + * \ingroup QueueManagement + */ +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );+ * + * Return the number of messages stored in a queue. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of messages available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );+ * + * Return the number of free spaces available in a queue. This is equal to the + * number of items that can be sent to the queue before the queue becomes full + * if no items are removed. + * + * @param xQueue A handle to the queue being queried. + * + * @return The number of spaces available in the queue. + * + * \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting + * \ingroup QueueManagement + */ +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *void vQueueDelete( QueueHandle_t xQueue );+ * + * Delete a queue - freeing all the memory allocated for storing of items + * placed on the queue. + * + * @param xQueue A handle to the queue to be deleted. + * + * \defgroup vQueueDelete vQueueDelete + * \ingroup QueueManagement + */ +void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueSendToFrontFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the front of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToFrontFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToFromFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPrioritTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendToFrontFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + taskYIELD (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToFrontFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_FRONT ) + + +/** + * queue. h + *+ BaseType_t xQueueSendToBackFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). + * + * Post an item to the back of a queue. It is safe to use this macro from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendToBackFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendToBackFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendToBackFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + taskYIELD (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendToBackFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueOverwriteFromISR( + QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); + *+ * + * A version of xQueueOverwrite() that can be used in an interrupt service + * routine (ISR). + * + * Only for use with queues that can hold a single item - so the queue is either + * empty or full. + * + * Post an item on a queue. If the queue is already full then overwrite the + * value held in the queue. The item is queued by copy, not by reference. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueOverwriteFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueOverwriteFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return xQueueOverwriteFromISR() is a macro that calls + * xQueueGenericSendFromISR(), and therefore has the same return values as + * xQueueSendToFrontFromISR(). However, pdPASS is the only value that can be + * returned because xQueueOverwriteFromISR() will write to the queue even when + * the queue is already full. + * + * Example usage: ++ + QueueHandle_t xQueue; + + void vFunction( void *pvParameters ) + { + // Create a queue to hold one uint32_t value. It is strongly + // recommended *not* to use xQueueOverwriteFromISR() on queues that can + // contain more than one value, and doing so will trigger an assertion + // if configASSERT() is defined. + xQueue = xQueueCreate( 1, sizeof( uint32_t ) ); +} + +void vAnInterruptHandler( void ) +{ +// xHigherPriorityTaskWoken must be set to pdFALSE before it is used. +BaseType_t xHigherPriorityTaskWoken = pdFALSE; +uint32_t ulVarToSend, ulValReceived; + + // Write the value 10 to the queue using xQueueOverwriteFromISR(). + ulVarToSend = 10; + xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken ); + + // The queue is full, but calling xQueueOverwriteFromISR() again will still + // pass because the value held in the queue will be overwritten with the + // new value. + ulVarToSend = 100; + xQueueOverwriteFromISR( xQueue, &ulVarToSend, &xHigherPriorityTaskWoken ); + + // Reading from the queue will now return 100. + + // ... + + if( xHigherPrioritytaskWoken == pdTRUE ) + { + // Writing to the queue caused a task to unblock and the unblocked task + // has a priority higher than or equal to the priority of the currently + // executing task (the task this interrupt interrupted). Perform a context + // switch so this interrupt returns directly to the unblocked task. + portYIELD_FROM_ISR(); // or portEND_SWITCHING_ISR() depending on the port. + } +} ++ * \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR + * \ingroup QueueManagement + */ +#define xQueueOverwriteFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE ) + +/** + * queue. h + *+ BaseType_t xQueueSendFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken + ); ++ * + * This is a macro that calls xQueueGenericSendFromISR(). It is included + * for backward compatibility with versions of FreeRTOS.org that did not + * include the xQueueSendToBackFromISR() and xQueueSendToFrontFromISR() + * macros. + * + * Post an item to the back of a queue. It is safe to use this function from + * within an interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWoken; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWoken = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post the byte. + xQueueSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWoken ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. + if( xHigherPriorityTaskWoken ) + { + // Actual macro used here is port specific. + portYIELD_FROM_ISR (); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK ) + +/** + * queue. h + *+ BaseType_t xQueueGenericSendFromISR( + QueueHandle_t xQueue, + const void *pvItemToQueue, + BaseType_t *pxHigherPriorityTaskWoken, + BaseType_t xCopyPosition + ); ++ * + * It is preferred that the macros xQueueSendFromISR(), + * xQueueSendToFrontFromISR() and xQueueSendToBackFromISR() be used in place + * of calling this function directly. xQueueGiveFromISR() is an + * equivalent for use by semaphores that don't actually copy any data. + * + * Post an item on a queue. It is safe to use this function from within an + * interrupt service routine. + * + * Items are queued by copy not reference so it is preferable to only + * queue small items, especially when called from an ISR. In most cases + * it would be preferable to store a pointer to the item being queued. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param pxHigherPriorityTaskWoken xQueueGenericSendFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending to the queue caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xQueueGenericSendFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @param xCopyPosition Can take the value queueSEND_TO_BACK to place the + * item at the back of the queue, or queueSEND_TO_FRONT to place the item + * at the front of the queue (for high priority messages). + * + * @return pdTRUE if the data was successfully sent to the queue, otherwise + * errQUEUE_FULL. + * + * Example usage for buffered IO (where the ISR can obtain more than one value + * per call): ++ void vBufferISR( void ) + { + char cIn; + BaseType_t xHigherPriorityTaskWokenByPost; + + // We have not woken a task at the start of the ISR. + xHigherPriorityTaskWokenByPost = pdFALSE; + + // Loop until the buffer is empty. + do + { + // Obtain a byte from the buffer. + cIn = portINPUT_BYTE( RX_REGISTER_ADDRESS ); + + // Post each byte. + xQueueGenericSendFromISR( xRxQueue, &cIn, &xHigherPriorityTaskWokenByPost, queueSEND_TO_BACK ); + + } while( portINPUT_BYTE( BUFFER_COUNT ) ); + + // Now the buffer is empty we can switch context if necessary. Note that the + // name of the yield function required is port specific. + if( xHigherPriorityTaskWokenByPost ) + { + taskYIELD_YIELD_FROM_ISR(); + } + } ++ * + * \defgroup xQueueSendFromISR xQueueSendFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * queue. h + *+ BaseType_t xQueueReceiveFromISR( + QueueHandle_t xQueue, + void *pvBuffer, + BaseType_t *pxTaskWoken + ); + *+ * + * Receive an item from a queue. It is safe to use this function from within an + * interrupt service routine. + * + * @param xQueue The handle to the queue from which the item is to be + * received. + * + * @param pvBuffer Pointer to the buffer into which the received item will + * be copied. + * + * @param pxTaskWoken A task may be blocked waiting for space to become + * available on the queue. If xQueueReceiveFromISR causes such a task to + * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * remain unchanged. + * + * @return pdTRUE if an item was successfully received from the queue, + * otherwise pdFALSE. + * + * Example usage: ++ + QueueHandle_t xQueue; + + // Function to create a queue and post some values. + void vAFunction( void *pvParameters ) + { + char cValueToPost; + const TickType_t xTicksToWait = ( TickType_t )0xff; + + // Create a queue capable of containing 10 characters. + xQueue = xQueueCreate( 10, sizeof( char ) ); + if( xQueue == 0 ) + { + // Failed to create the queue. + } + + // ... + + // Post some characters that will be used within an ISR. If the queue + // is full then this task will block for xTicksToWait ticks. + cValueToPost = 'a'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + cValueToPost = 'b'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + + // ... keep posting characters ... this task may block when the queue + // becomes full. + + cValueToPost = 'c'; + xQueueSend( xQueue, ( void * ) &cValueToPost, xTicksToWait ); + } + + // ISR that outputs all the characters received on the queue. + void vISR_Routine( void ) + { + BaseType_t xTaskWokenByReceive = pdFALSE; + char cRxedChar; + + while( xQueueReceiveFromISR( xQueue, ( void * ) &cRxedChar, &xTaskWokenByReceive) ) + { + // A character was received. Output the character now. + vOutputCharacter( cRxedChar ); + + // If removing the character from the queue woke the task that was + // posting onto the queue cTaskWokenByReceive will have been set to + // pdTRUE. No matter how many times this loop iterates only one + // task will be woken. + } + + if( cTaskWokenByPost != ( char ) pdFALSE; + { + taskYIELD (); + } + } ++ * \defgroup xQueueReceiveFromISR xQueueReceiveFromISR + * \ingroup QueueManagement + */ +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* + * Utilities to query queues that are safe to use from an ISR. These utilities + * should be used only from witin an ISR, or within a critical section. + */ +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ); + +/* + * For internal use only. Use xSemaphoreCreateMutex(), + * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling + * these functions directly. + */ +QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Use xSemaphoreTakeMutexRecursive() or + * xSemaphoreGiveMutexRecursive() instead of calling these functions directly. + */ +BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; + +/* + * Reset a queue back to its original empty state. The return value is now + * obsolete and is always set to pdPASS. + */ +#define xQueueReset( xQueue ) xQueueGenericReset( xQueue, pdFALSE ) + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger. If you are not using a kernel + * aware debugger then this function can be ignored. + * + * configQUEUE_REGISTRY_SIZE defines the maximum number of handles the + * registry can hold. configQUEUE_REGISTRY_SIZE must be greater than 0 + * within FreeRTOSConfig.h for the registry to be available. Its value + * does not effect the number of queues, semaphores and mutexes that can be + * created - just the number that the registry can hold. + * + * @param xQueue The handle of the queue being added to the registry. This + * is the handle returned by a call to xQueueCreate(). Semaphore and mutex + * handles can also be passed in here. + * + * @param pcName The name to be associated with the handle. This is the + * name that the kernel aware debugger will display. The queue registry only + * stores a pointer to the string - so the string must be persistent (global or + * preferably in ROM/Flash), not on the stack. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * The registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call vQueueAddToRegistry() add + * a queue, semaphore or mutex handle to the registry if you want the handle + * to be available to a kernel aware debugger, and vQueueUnregisterQueue() to + * remove the queue, semaphore or mutex from the register. If you are not using + * a kernel aware debugger then this function can be ignored. + * + * @param xQueue The handle of the queue being removed from the registry. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + void vQueueUnregisterQueue( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +#endif + +/* + * The queue registry is provided as a means for kernel aware debuggers to + * locate queues, semaphores and mutexes. Call pcQueueGetName() to look + * up and return the name of a queue in the queue registry from the queue's + * handle. + * + * @param xQueue The handle of the queue the name of which will be returned. + * @return If the queue is in the registry then a pointer to the name of the + * queue is returned. If the queue is not in the registry then NULL is + * returned. + */ +#if( configQUEUE_REGISTRY_SIZE > 0 ) + const char *pcQueueGetName( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Queue sets provide a mechanism to allow a task to block (pend) on a read + * operation from multiple queues or semaphores simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * A queue set must be explicitly created using a call to xQueueCreateSet() + * before it can be used. Once created, standard FreeRTOS queues and semaphores + * can be added to the set using calls to xQueueAddToSet(). + * xQueueSelectFromSet() is then used to determine which, if any, of the queues + * or semaphores contained in the set is in a state where a queue read or + * semaphore take operation would be successful. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: An additional 4 bytes of RAM is required for each space in a every + * queue added to a queue set. Therefore counting semaphores that have a high + * maximum count value should not be added to a queue set. + * + * Note 4: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param uxEventQueueLength Queue sets store events that occur on + * the queues and semaphores contained in the set. uxEventQueueLength specifies + * the maximum number of events that can be queued at once. To be absolutely + * certain that events are not lost uxEventQueueLength should be set to the + * total sum of the length of the queues added to the set, where binary + * semaphores and mutexes have a length of 1, and counting semaphores have a + * length set by their maximum count value. Examples: + * + If a queue set is to hold a queue of length 5, another queue of length 12, + * and a binary semaphore, then uxEventQueueLength should be set to + * (5 + 12 + 1), or 18. + * + If a queue set is to hold three binary semaphores then uxEventQueueLength + * should be set to (1 + 1 + 1 ), or 3. + * + If a queue set is to hold a counting semaphore that has a maximum count of + * 5, and a counting semaphore that has a maximum count of 3, then + * uxEventQueueLength should be set to (5 + 3), or 8. + * + * @return If the queue set is created successfully then a handle to the created + * queue set is returned. Otherwise NULL is returned. + */ +QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; + +/* + * Adds a queue or semaphore to a queue set that was previously created by a + * call to xQueueCreateSet(). + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being added to + * the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set to which the queue or semaphore + * is being added. + * + * @return If the queue or semaphore was successfully added to the queue set + * then pdPASS is returned. If the queue could not be successfully added to the + * queue set because it is already a member of a different queue set then pdFAIL + * is returned. + */ +BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * Removes a queue or semaphore from a queue set. A queue or semaphore can only + * be removed from a set if the queue or semaphore is empty. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * @param xQueueOrSemaphore The handle of the queue or semaphore being removed + * from the queue set (cast to an QueueSetMemberHandle_t type). + * + * @param xQueueSet The handle of the queue set in which the queue or semaphore + * is included. + * + * @return If the queue or semaphore was successfully removed from the queue set + * then pdPASS is returned. If the queue was not in the queue set, or the + * queue (or semaphore) was not empty, then pdFAIL is returned. + */ +BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* + * xQueueSelectFromSet() selects from the members of a queue set a queue or + * semaphore that either contains data (in the case of a queue) or is available + * to take (in the case of a semaphore). xQueueSelectFromSet() effectively + * allows a task to block (pend) on a read operation on all the queues and + * semaphores in a queue set simultaneously. + * + * See FreeRTOS/Source/Demo/Common/Minimal/QueueSet.c for an example using this + * function. + * + * Note 1: See the documentation on http://wwwFreeRTOS.org/RTOS-queue-sets.html + * for reasons why queue sets are very rarely needed in practice as there are + * simpler methods of blocking on multiple objects. + * + * Note 2: Blocking on a queue set that contains a mutex will not cause the + * mutex holder to inherit the priority of the blocked task. + * + * Note 3: A receive (in the case of a queue) or take (in the case of a + * semaphore) operation must not be performed on a member of a queue set unless + * a call to xQueueSelectFromSet() has first returned a handle to that set member. + * + * @param xQueueSet The queue set on which the task will (potentially) block. + * + * @param xTicksToWait The maximum time, in ticks, that the calling task will + * remain in the Blocked state (with other tasks executing) to wait for a member + * of the queue set to be ready for a successful queue read or semaphore take + * operation. + * + * @return xQueueSelectFromSet() will return the handle of a queue (cast to + * a QueueSetMemberHandle_t type) contained in the queue set that contains data, + * or the handle of a semaphore (cast to a QueueSetMemberHandle_t type) contained + * in the queue set that is available, or NULL if no such queue or semaphore + * exists before before the specified block time expires. + */ +QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * A version of xQueueSelectFromSet() that can be used from an ISR. + */ +QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + +/* Not public API functions. */ +void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; +UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif + +#endif /* QUEUE_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h new file mode 100644 index 0000000..32b4e5e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/semphr.h @@ -0,0 +1,1140 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef SEMAPHORE_H +#define SEMAPHORE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h" must appear in source files before "include semphr.h" +#endif + +#include "queue.h" + +typedef QueueHandle_t SemaphoreHandle_t; + +#define semBINARY_SEMAPHORE_QUEUE_LENGTH ( ( uint8_t ) 1U ) +#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U ) +#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + + +/** + * semphr. h + *vSemaphoreCreateBinary( SemaphoreHandle_t xSemaphore )+ * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * This old vSemaphoreCreateBinary() macro is now deprecated in favour of the + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * Macro that implements a semaphore by using the existing queue mechanism. + * The queue length is 1 as this is a binary semaphore. The data size is 0 + * as we don't want to actually store any data - we just want to know if the + * queue is empty or full. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param xSemaphore Handle to the created semaphore. Should be of type SemaphoreHandle_t. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to vSemaphoreCreateBinary (). + // This is a macro so pass the variable in directly. + vSemaphoreCreateBinary( xSemaphore ); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define vSemaphoreCreateBinary( xSemaphore ) \ + { \ + ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ + if( ( xSemaphore ) != NULL ) \ + { \ + ( void ) xSemaphoreGive( ( xSemaphore ) ); \ + } \ + } +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateBinary( void )+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this + * xSemaphoreCreateBinary() function. Note that binary semaphores created using + * the vSemaphoreCreateBinary() macro are created in a state such that the + * first call to 'take' the semaphore would pass, whereas binary semaphores + * created using xSemaphoreCreateBinary() are created in a state such that the + * the semaphore must first be 'given' before it can be 'taken'. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @return Handle to the created semaphore, or NULL if the memory required to + * hold the semaphore's data structures could not be allocated. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateBinary(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateBinary(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer )+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * NOTE: In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the semaphore is created then a handle to the created semaphore is + * returned. If pxSemaphoreBuffer is NULL then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + StaticSemaphore_t xSemaphoreBuffer; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateBinary(). + // The semaphore's data structures will be placed in the xSemaphoreBuffer + // variable, the address of which is passed into the function. The + // function's parameter is not NULL, so the function will not attempt any + // dynamic memory allocation, and therefore the function will not return + // return NULL. + xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer ); + + // Rest of task code goes here. + } ++ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *xSemaphoreTake( + * SemaphoreHandle_t xSemaphore, + * TickType_t xBlockTime + * )+ * + * Macro to obtain a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). + * + * @param xSemaphore A handle to the semaphore being taken - obtained when + * the semaphore was created. + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. A block + * time of portMAX_DELAY can be used to block indefinitely (provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h). + * + * @return pdTRUE if the semaphore was obtained. pdFALSE + * if xBlockTime expired without the semaphore becoming available. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + // A task that creates a semaphore. + void vATask( void * pvParameters ) + { + // Create the semaphore to guard a shared resource. + xSemaphore = xSemaphoreCreateBinary(); + } + + // A task that uses the semaphore. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xSemaphore != NULL ) + { + // See if we can obtain the semaphore. If the semaphore is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTake( xSemaphore, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the semaphore and can now access the + // shared resource. + + // ... + + // We have finished accessing the shared resource. Release the + // semaphore. + xSemaphoreGive( xSemaphore ); + } + else + { + // We could not obtain the semaphore and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreTake xSemaphoreTake + * \ingroup Semaphores + */ +#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) ) + +/** + * semphr. h + * xSemaphoreTakeRecursive( + * SemaphoreHandle_t xMutex, + * TickType_t xBlockTime + * ) + * + * Macro to recursively obtain, or 'take', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being obtained. This is the + * handle returned by xSemaphoreCreateRecursiveMutex(); + * + * @param xBlockTime The time in ticks to wait for the semaphore to become + * available. The macro portTICK_PERIOD_MS can be used to convert this to a + * real time. A block time of zero can be used to poll the semaphore. If + * the task already owns the semaphore then xSemaphoreTakeRecursive() will + * return immediately no matter what the value of xBlockTime. + * + * @return pdTRUE if the semaphore was obtained. pdFALSE if xBlockTime + * expired without the semaphore becoming available. + * + * Example usage: ++ SemaphoreHandle_t xMutex = NULL; + + // A task that creates a mutex. + void vATask( void * pvParameters ) + { + // Create the mutex to guard a shared resource. + xMutex = xSemaphoreCreateRecursiveMutex(); + } + + // A task that uses the mutex. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xMutex != NULL ) + { + // See if we can obtain the mutex. If the mutex is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTakeRecursive( xSemaphore, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the mutex and can now access the + // shared resource. + + // ... + // For some reason due to the nature of the code further calls to + // xSemaphoreTakeRecursive() are made on the same mutex. In real + // code these would not be just sequential calls as this would make + // no sense. Instead the calls are likely to be buried inside + // a more complex call structure. + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + + // The mutex has now been 'taken' three times, so will not be + // available to another task until it has also been given back + // three times. Again it is unlikely that real code would have + // these calls sequentially, but instead buried in a more complex + // call structure. This is just for illustrative purposes. + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + + // Now the mutex can be taken by other tasks. + } + else + { + // We could not obtain the mutex and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreTakeRecursive( xMutex, xBlockTime ) xQueueTakeMutexRecursive( ( xMutex ), ( xBlockTime ) ) +#endif + +/** + * semphr. h + *xSemaphoreGive( SemaphoreHandle_t xSemaphore )+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary(), xSemaphoreCreateMutex() or + * xSemaphoreCreateCounting(). and obtained using sSemaphoreTake(). + * + * This macro must not be used from an ISR. See xSemaphoreGiveFromISR () for + * an alternative which can be used from an ISR. + * + * This macro must also not be used on semaphores created using + * xSemaphoreCreateRecursiveMutex(). + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @return pdTRUE if the semaphore was released. pdFALSE if an error occurred. + * Semaphores are implemented using queues. An error can occur if there is + * no space on the queue to post a message - indicating that the + * semaphore was not first obtained correctly. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore = NULL; + + void vATask( void * pvParameters ) + { + // Create the semaphore to guard a shared resource. + xSemaphore = vSemaphoreCreateBinary(); + + if( xSemaphore != NULL ) + { + if( xSemaphoreGive( xSemaphore ) != pdTRUE ) + { + // We would expect this call to fail because we cannot give + // a semaphore without first "taking" it! + } + + // Obtain the semaphore - don't block if the semaphore is not + // immediately available. + if( xSemaphoreTake( xSemaphore, ( TickType_t ) 0 ) ) + { + // We now have the semaphore and can access the shared resource. + + // ... + + // We have finished accessing the shared resource so can free the + // semaphore. + if( xSemaphoreGive( xSemaphore ) != pdTRUE ) + { + // We would not expect this call to fail because we must have + // obtained the semaphore to get here. + } + } + } + } ++ * \defgroup xSemaphoreGive xSemaphoreGive + * \ingroup Semaphores + */ +#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK ) + +/** + * semphr. h + *xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex )+ * + * Macro to recursively release, or 'give', a mutex type semaphore. + * The mutex must have previously been created using a call to + * xSemaphoreCreateRecursiveMutex(); + * + * configUSE_RECURSIVE_MUTEXES must be set to 1 in FreeRTOSConfig.h for this + * macro to be available. + * + * This macro must not be used on mutexes created using xSemaphoreCreateMutex(). + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * @param xMutex A handle to the mutex being released, or 'given'. This is the + * handle returned by xSemaphoreCreateMutex(); + * + * @return pdTRUE if the semaphore was given. + * + * Example usage: ++ SemaphoreHandle_t xMutex = NULL; + + // A task that creates a mutex. + void vATask( void * pvParameters ) + { + // Create the mutex to guard a shared resource. + xMutex = xSemaphoreCreateRecursiveMutex(); + } + + // A task that uses the mutex. + void vAnotherTask( void * pvParameters ) + { + // ... Do other things. + + if( xMutex != NULL ) + { + // See if we can obtain the mutex. If the mutex is not available + // wait 10 ticks to see if it becomes free. + if( xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ) == pdTRUE ) + { + // We were able to obtain the mutex and can now access the + // shared resource. + + // ... + // For some reason due to the nature of the code further calls to + // xSemaphoreTakeRecursive() are made on the same mutex. In real + // code these would not be just sequential calls as this would make + // no sense. Instead the calls are likely to be buried inside + // a more complex call structure. + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + xSemaphoreTakeRecursive( xMutex, ( TickType_t ) 10 ); + + // The mutex has now been 'taken' three times, so will not be + // available to another task until it has also been given back + // three times. Again it is unlikely that real code would have + // these calls sequentially, it would be more likely that the calls + // to xSemaphoreGiveRecursive() would be called as a call stack + // unwound. This is just for demonstrative purposes. + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + xSemaphoreGiveRecursive( xMutex ); + + // Now the mutex can be taken by other tasks. + } + else + { + // We could not obtain the mutex and can therefore not access + // the shared resource safely. + } + } + } ++ * \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive + * \ingroup Semaphores + */ +#if( configUSE_RECURSIVE_MUTEXES == 1 ) + #define xSemaphoreGiveRecursive( xMutex ) xQueueGiveMutexRecursive( ( xMutex ) ) +#endif + +/** + * semphr. h + *+ xSemaphoreGiveFromISR( + SemaphoreHandle_t xSemaphore, + BaseType_t *pxHigherPriorityTaskWoken + )+ * + * Macro to release a semaphore. The semaphore must have previously been + * created with a call to xSemaphoreCreateBinary() or xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR. + * + * @param xSemaphore A handle to the semaphore being released. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if giving the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreGiveFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully given, otherwise errQUEUE_FULL. + * + * Example usage: ++ \#define LONG_TIME 0xffff + \#define TICKS_TO_WAIT 10 + SemaphoreHandle_t xSemaphore = NULL; + + // Repetitive task. + void vATask( void * pvParameters ) + { + for( ;; ) + { + // We want this task to run every 10 ticks of a timer. The semaphore + // was created before this task was started. + + // Block waiting for the semaphore to become available. + if( xSemaphoreTake( xSemaphore, LONG_TIME ) == pdTRUE ) + { + // It is time to execute. + + // ... + + // We have finished our task. Return to the top of the loop where + // we will block on the semaphore until it is time to execute + // again. Note when using the semaphore for synchronisation with an + // ISR in this manner there is no need to 'give' the semaphore back. + } + } + } + + // Timer ISR + void vTimerISR( void * pvParameters ) + { + static uint8_t ucLocalTickCount = 0; + static BaseType_t xHigherPriorityTaskWoken; + + // A timer tick has occurred. + + // ... Do other time functions. + + // Is it time for vATask () to run? + xHigherPriorityTaskWoken = pdFALSE; + ucLocalTickCount++; + if( ucLocalTickCount >= TICKS_TO_WAIT ) + { + // Unblock the task by releasing the semaphore. + xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken ); + + // Reset the count so we release the semaphore again in 10 ticks time. + ucLocalTickCount = 0; + } + + if( xHigherPriorityTaskWoken != pdFALSE ) + { + // We can force a context switch here. Context switching from an + // ISR uses port specific syntax. Check the demo task for your port + // to find the syntax required. + } + } ++ * \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR + * \ingroup Semaphores + */ +#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *+ xSemaphoreTakeFromISR( + SemaphoreHandle_t xSemaphore, + BaseType_t *pxHigherPriorityTaskWoken + )+ * + * Macro to take a semaphore from an ISR. The semaphore must have + * previously been created with a call to xSemaphoreCreateBinary() or + * xSemaphoreCreateCounting(). + * + * Mutex type semaphores (those created using a call to xSemaphoreCreateMutex()) + * must not be used with this macro. + * + * This macro can be used from an ISR, however taking a semaphore from an ISR + * is not a common operation. It is likely to only be useful when taking a + * counting semaphore when an interrupt is obtaining an object from a resource + * pool (when the semaphore count indicates the number of resources available). + * + * @param xSemaphore A handle to the semaphore being taken. This is the + * handle returned when the semaphore was created. + * + * @param pxHigherPriorityTaskWoken xSemaphoreTakeFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if taking the semaphore caused a task + * to unblock, and the unblocked task has a priority higher than the currently + * running task. If xSemaphoreTakeFromISR() sets this value to pdTRUE then + * a context switch should be requested before the interrupt is exited. + * + * @return pdTRUE if the semaphore was successfully taken, otherwise + * pdFALSE + */ +#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) ) + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateMutex( void )+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return If the mutex was successfully created then a handle to the created + * semaphore is returned. If there was not enough heap to allocate the mutex + * data structures then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateMutex(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateMutex(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer )+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will be used to hold the mutex's data structure, removing the need for + * the memory to be allocated dynamically. + * + * @return If the mutex was successfully created then a handle to the created + * mutex is returned. If pxMutexBuffer was NULL then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xMutexBuffer; + + void vATask( void * pvParameters ) + { + // A mutex cannot be used before it has been created. xMutexBuffer is + // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is + // attempted. + xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer ); + + // As no dynamic memory allocation was performed, xSemaphore cannot be NULL, + // so there is no need to check it. + } ++ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic + * \ingroup Semaphores + */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void )+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @return xSemaphore Handle to the created mutex semaphore. Should be of type + * SemaphoreHandle_t. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + // Semaphore cannot be used before a call to xSemaphoreCreateMutex(). + // This is a macro so pass the variable in directly. + xSemaphore = xSemaphoreCreateRecursiveMutex(); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateRecursiveMutex xSemaphoreCreateRecursiveMutex + * \ingroup Semaphores + */ +#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer )+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the recursive mutex's data structure, + * removing the need for the memory to be allocated dynamically. + * + * @return If the recursive mutex was successfully created then a handle to the + * created recursive mutex is returned. If pxMutexBuffer was NULL then NULL is + * returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xMutexBuffer; + + void vATask( void * pvParameters ) + { + // A recursive semaphore cannot be used before it is created. Here a + // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic(). + // The address of xMutexBuffer is passed into the function, and will hold + // the mutexes data structures - so no dynamic memory allocation will be + // attempted. + xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer ); + + // As no dynamic memory allocation was performed, xSemaphore cannot be NULL, + // so there is no need to check it. + } ++ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic + * \ingroup Semaphores + */ +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount )+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer can + * instead optionally provide the memory that will get used by the counting + * semaphore. xSemaphoreCreateCountingStatic() therefore allows a counting + * semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @return Handle to the created semaphore. Null if the semaphore could not be + * created. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + + void vATask( void * pvParameters ) + { + SemaphoreHandle_t xSemaphore = NULL; + + // Semaphore cannot be used before a call to xSemaphoreCreateCounting(). + // The max value to which the semaphore can count should be 10, and the + // initial value assigned to the count should be 0. + xSemaphore = xSemaphoreCreateCounting( 10, 0 ); + + if( xSemaphore != NULL ) + { + // The semaphore was created successfully. + // The semaphore can now be used. + } + } ++ * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting + * \ingroup Semaphores + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#endif + +/** + * semphr. h + *SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer )+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer must + * provide the memory. xSemaphoreCreateCountingStatic() therefore allows a + * counting semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the counting semaphore was successfully created then a handle to + * the created counting semaphore is returned. If pxSemaphoreBuffer was NULL + * then NULL is returned. + * + * Example usage: ++ SemaphoreHandle_t xSemaphore; + StaticSemaphore_t xSemaphoreBuffer; + + void vATask( void * pvParameters ) + { + SemaphoreHandle_t xSemaphore = NULL; + + // Counting semaphore cannot be used before they have been created. Create + // a counting semaphore using xSemaphoreCreateCountingStatic(). The max + // value to which the semaphore can count is 10, and the initial value + // assigned to the count will be 0. The address of xSemaphoreBuffer is + // passed in and will be used to hold the semaphore structure, so no dynamic + // memory allocation will be used. + xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer ); + + // No memory allocation was attempted so xSemaphore cannot be NULL, so there + // is no need to check its value. + } ++ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer ) xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( pxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * semphr. h + *void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );+ * + * Delete a semaphore. This function must be used with care. For example, + * do not delete a mutex type semaphore if the mutex is held by a task. + * + * @param xSemaphore A handle to the semaphore to be deleted. + * + * \defgroup vSemaphoreDelete vSemaphoreDelete + * \ingroup Semaphores + */ +#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) ) + +/** + * semphr.h + *TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + * Note: This is a good way of determining if the calling task is the mutex + * holder, but not a good way of determining the identity of the mutex holder as + * the holder may change between the function exiting and the returned value + * being tested. + */ +#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) ) + +/** + * semphr.h + *TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );+ * + * If xMutex is indeed a mutex type semaphore, return the current mutex holder. + * If xMutex is not a mutex type semaphore, or the mutex is available (not held + * by a task), return NULL. + * + */ +#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) ) + +/** + * semphr.h + *UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );+ * + * If the semaphore is a counting semaphore then uxSemaphoreGetCount() returns + * its current count value. If the semaphore is a binary semaphore then + * uxSemaphoreGetCount() returns 1 if the semaphore is available, and 0 if the + * semaphore is not available. + * + */ +#define uxSemaphoreGetCount( xSemaphore ) uxQueueMessagesWaiting( ( QueueHandle_t ) ( xSemaphore ) ) + +#endif /* SEMAPHORE_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h new file mode 100644 index 0000000..fb5ed15 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stack_macros.h @@ -0,0 +1,129 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + + /* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t *pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h new file mode 100644 index 0000000..5f9e012 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/stream_buffer.h @@ -0,0 +1,855 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * Stream buffers are used to send a continuous stream of data from one task or + * interrupt to another. Their implementation is light weight, making them + * particularly suited for interrupt to task and core to core communication + * scenarios. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section section and set the + * receive block time to 0. + * + */ + +#ifndef STREAM_BUFFER_H +#define STREAM_BUFFER_H + +#if defined( __cplusplus ) +extern "C" { +#endif + +/** + * Type by which stream buffers are referenced. For example, a call to + * xStreamBufferCreate() returns an StreamBufferHandle_t variable that can + * then be used as a parameter to xStreamBufferSend(), xStreamBufferReceive(), + * etc. + */ +struct StreamBufferDef_t; +typedef struct StreamBufferDef_t * StreamBufferHandle_t; + + +/** + * message_buffer.h + * ++StreamBufferHandle_t xStreamBufferCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes ); ++ * + * Creates a new stream buffer using dynamically allocated memory. See + * xStreamBufferCreateStatic() for a version that uses statically allocated + * memory (memory that is allocated at compile time). + * + * configSUPPORT_DYNAMIC_ALLOCATION must be set to 1 or left undefined in + * FreeRTOSConfig.h for xStreamBufferCreate() to be available. + * + * @param xBufferSizeBytes The total number of bytes the stream buffer will be + * able to hold at any one time. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @return If NULL is returned, then the stream buffer cannot be created + * because there is insufficient heap memory available for FreeRTOS to allocate + * the stream buffer data structures and storage area. A non-NULL value being + * returned indicates that the stream buffer has been created successfully - + * the returned value should be stored as the handle to the created stream + * buffer. + * + * Example use: ++ +void vAFunction( void ) +{ +StreamBufferHandle_t xStreamBuffer; +const size_t xStreamBufferSizeBytes = 100, xTriggerLevel = 10; + + // Create a stream buffer that can hold 100 bytes. The memory used to hold + // both the stream buffer structure and the data in the stream buffer is + // allocated dynamically. + xStreamBuffer = xStreamBufferCreate( xStreamBufferSizeBytes, xTriggerLevel ); + + if( xStreamBuffer == NULL ) + { + // There was not enough heap memory space available to create the + // stream buffer. + } + else + { + // The stream buffer was created successfully and can now be used. + } +} ++ * \defgroup xStreamBufferCreate xStreamBufferCreate + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE ) + +/** + * stream_buffer.h + * ++StreamBufferHandle_t xStreamBufferCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t *pucStreamBufferStorageArea, + StaticStreamBuffer_t *pxStaticStreamBuffer ); ++ * Creates a new stream buffer using statically allocated memory. See + * xStreamBufferCreate() for a version that uses dynamically allocated memory. + * + * configSUPPORT_STATIC_ALLOCATION must be set to 1 in FreeRTOSConfig.h for + * xStreamBufferCreateStatic() to be available. + * + * @param xBufferSizeBytes The size, in bytes, of the buffer pointed to by the + * pucStreamBufferStorageArea parameter. + * + * @param xTriggerLevelBytes The number of bytes that must be in the stream + * buffer before a task that is blocked on the stream buffer to wait for data is + * moved out of the blocked state. For example, if a task is blocked on a read + * of an empty stream buffer that has a trigger level of 1 then the task will be + * unblocked when a single byte is written to the buffer or the task's block + * time expires. As another example, if a task is blocked on a read of an empty + * stream buffer that has a trigger level of 10 then the task will not be + * unblocked until the stream buffer contains at least 10 bytes or the task's + * block time expires. If a reading task's block time expires before the + * trigger level is reached then the task will still receive however many bytes + * are actually available. Setting a trigger level of 0 will result in a + * trigger level of 1 being used. It is not valid to specify a trigger level + * that is greater than the buffer size. + * + * @param pucStreamBufferStorageArea Must point to a uint8_t array that is at + * least xBufferSizeBytes + 1 big. This is the array to which streams are + * copied when they are written to the stream buffer. + * + * @param pxStaticStreamBuffer Must point to a variable of type + * StaticStreamBuffer_t, which will be used to hold the stream buffer's data + * structure. + * + * @return If the stream buffer is created successfully then a handle to the + * created stream buffer is returned. If either pucStreamBufferStorageArea or + * pxStaticstreamBuffer are NULL then NULL is returned. + * + * Example use: ++ +// Used to dimension the array used to hold the streams. The available space +// will actually be one less than this, so 999. +#define STORAGE_SIZE_BYTES 1000 + +// Defines the memory that will actually hold the streams within the stream +// buffer. +static uint8_t ucStorageBuffer[ STORAGE_SIZE_BYTES ]; + +// The variable used to hold the stream buffer structure. +StaticStreamBuffer_t xStreamBufferStruct; + +void MyFunction( void ) +{ +StreamBufferHandle_t xStreamBuffer; +const size_t xTriggerLevel = 1; + + xStreamBuffer = xStreamBufferCreateStatic( sizeof( ucBufferStorage ), + xTriggerLevel, + ucBufferStorage, + &xStreamBufferStruct ); + + // As neither the pucStreamBufferStorageArea or pxStaticStreamBuffer + // parameters were NULL, xStreamBuffer will not be NULL, and can be used to + // reference the created stream buffer in other stream buffer API calls. + + // Other code that uses the stream buffer can go here. +} + ++ * \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic + * \ingroup StreamBufferManagement + */ +#define xStreamBufferCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pucStreamBufferStorageArea, pxStaticStreamBuffer ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer ) + +/** + * stream_buffer.h + * ++size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ); ++ * + * Sends bytes to a stream buffer. The bytes are copied into the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the buffer that holds the bytes to be copied + * into the stream buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for enough space to become available in the stream + * buffer, should the stream buffer contain too little space to hold the + * another xDataLengthBytes bytes. The block time is specified in tick periods, + * so the absolute time it represents is dependent on the tick frequency. The + * macro pdMS_TO_TICKS() can be used to convert a time specified in milliseconds + * into a time specified in ticks. Setting xTicksToWait to portMAX_DELAY will + * cause the task to wait indefinitely (without timing out), provided + * INCLUDE_vTaskSuspend is set to 1 in FreeRTOSConfig.h. If a task times out + * before it can write all xDataLengthBytes into the buffer it will still write + * as many bytes as possible. A task does not use any CPU time when it is in + * the blocked state. + * + * @return The number of bytes written to the stream buffer. If a task times + * out before it can write all xDataLengthBytes into the buffer it will still + * write as many bytes as possible. + * + * Example use: ++void vAFunction( StreamBufferHandle_t xStreamBuffer ) +{ +size_t xBytesSent; +uint8_t ucArrayToSend[] = { 0, 1, 2, 3 }; +char *pcStringToSend = "String to send"; +const TickType_t x100ms = pdMS_TO_TICKS( 100 ); + + // Send an array to the stream buffer, blocking for a maximum of 100ms to + // wait for enough space to be available in the stream buffer. + xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) ucArrayToSend, sizeof( ucArrayToSend ), x100ms ); + + if( xBytesSent != sizeof( ucArrayToSend ) ) + { + // The call to xStreamBufferSend() times out before there was enough + // space in the buffer for the data to be written, but it did + // successfully write xBytesSent bytes. + } + + // Send the string to the stream buffer. Return immediately if there is not + // enough space in the buffer. + xBytesSent = xStreamBufferSend( xStreamBuffer, ( void * ) pcStringToSend, strlen( pcStringToSend ), 0 ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // The entire string could not be added to the stream buffer because + // there was not enough free space in the buffer, but xBytesSent bytes + // were sent. Could try again to send the remaining bytes. + } +} ++ * \defgroup xStreamBufferSend xStreamBufferSend + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * Interrupt safe version of the API function that sends a stream of bytes to + * the stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferSend() to write to a stream buffer from a task. Use + * xStreamBufferSendFromISR() to write to a stream buffer from an interrupt + * service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer to which a stream is + * being sent. + * + * @param pvTxData A pointer to the data that is to be copied into the stream + * buffer. + * + * @param xDataLengthBytes The maximum number of bytes to copy from pvTxData + * into the stream buffer. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for data. Calling + * xStreamBufferSendFromISR() can make data available, and so cause a task that + * was waiting for data to leave the Blocked state. If calling + * xStreamBufferSendFromISR() causes a task to leave the Blocked state, and the + * unblocked task has a priority higher than the currently executing task (the + * task that was interrupted), then, internally, xStreamBufferSendFromISR() + * will set *pxHigherPriorityTaskWoken to pdTRUE. If + * xStreamBufferSendFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. This will + * ensure that the interrupt returns directly to the highest priority Ready + * state task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it + * is passed into the function. See the example code below for an example. + * + * @return The number of bytes actually written to the stream buffer, which will + * be less than xDataLengthBytes if the stream buffer didn't have enough free + * space for all the bytes to be written. + * + * Example use: ++// A stream buffer that has already been created. +StreamBufferHandle_t xStreamBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +size_t xBytesSent; +char *pcStringToSend = "String to send"; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Attempt to send the string to the stream buffer. + xBytesSent = xStreamBufferSendFromISR( xStreamBuffer, + ( void * ) pcStringToSend, + strlen( pcStringToSend ), + &xHigherPriorityTaskWoken ); + + if( xBytesSent != strlen( pcStringToSend ) ) + { + // There was not enough free space in the stream buffer for the entire + // string to be written, ut xBytesSent bytes were written. + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xStreamBufferSendFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ); ++ * + * Receives bytes from a stream buffer. + * + * ***NOTE***: Uniquely among FreeRTOS objects, the stream buffer + * implementation (so also the message buffer implementation, as message buffers + * are built on top of stream buffers) assumes there is only one task or + * interrupt that will write to the buffer (the writer), and only one task or + * interrupt that will read from the buffer (the reader). It is safe for the + * writer and reader to be different tasks or interrupts, but, unlike other + * FreeRTOS objects, it is not safe to have multiple different writers or + * multiple different readers. If there are to be multiple different writers + * then the application writer must place each call to a writing API function + * (such as xStreamBufferSend()) inside a critical section and set the send + * block time to 0. Likewise, if there are to be multiple different readers + * then the application writer must place each call to a reading API function + * (such as xStreamBufferRead()) inside a critical section and set the receive + * block time to 0. + * + * Use xStreamBufferReceive() to read from a stream buffer from a task. Use + * xStreamBufferReceiveFromISR() to read from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which bytes are to + * be received. + * + * @param pvRxData A pointer to the buffer into which the received bytes will be + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param xTicksToWait The maximum amount of time the task should remain in the + * Blocked state to wait for data to become available if the stream buffer is + * empty. xStreamBufferReceive() will return immediately if xTicksToWait is + * zero. The block time is specified in tick periods, so the absolute time it + * represents is dependent on the tick frequency. The macro pdMS_TO_TICKS() can + * be used to convert a time specified in milliseconds into a time specified in + * ticks. Setting xTicksToWait to portMAX_DELAY will cause the task to wait + * indefinitely (without timing out), provided INCLUDE_vTaskSuspend is set to 1 + * in FreeRTOSConfig.h. A task does not use any CPU time when it is in the + * Blocked state. + * + * @return The number of bytes actually read from the stream buffer, which will + * be less than xBufferLengthBytes if the call to xStreamBufferReceive() timed + * out before xBufferLengthBytes were available. + * + * Example use: ++void vAFunction( StreamBuffer_t xStreamBuffer ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +const TickType_t xBlockTime = pdMS_TO_TICKS( 20 ); + + // Receive up to another sizeof( ucRxData ) bytes from the stream buffer. + // Wait in the Blocked state (so not using any CPU processing time) for a + // maximum of 100ms for the full sizeof( ucRxData ) number of bytes to be + // available. + xReceivedBytes = xStreamBufferReceive( xStreamBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + xBlockTime ); + + if( xReceivedBytes > 0 ) + { + // A ucRxData contains another xRecievedBytes bytes of data, which can + // be processed here.... + } +} ++ * \defgroup xStreamBufferReceive xStreamBufferReceive + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * An interrupt safe version of the API function that receives bytes from a + * stream buffer. + * + * Use xStreamBufferReceive() to read bytes from a stream buffer from a task. + * Use xStreamBufferReceiveFromISR() to read bytes from a stream buffer from an + * interrupt service routine (ISR). + * + * @param xStreamBuffer The handle of the stream buffer from which a stream + * is being received. + * + * @param pvRxData A pointer to the buffer into which the received bytes are + * copied. + * + * @param xBufferLengthBytes The length of the buffer pointed to by the + * pvRxData parameter. This sets the maximum number of bytes to receive in one + * call. xStreamBufferReceive will return as many bytes as possible up to a + * maximum set by xBufferLengthBytes. + * + * @param pxHigherPriorityTaskWoken It is possible that a stream buffer will + * have a task blocked on it waiting for space to become available. Calling + * xStreamBufferReceiveFromISR() can make space available, and so cause a task + * that is waiting for space to leave the Blocked state. If calling + * xStreamBufferReceiveFromISR() causes a task to leave the Blocked state, and + * the unblocked task has a priority higher than the currently executing task + * (the task that was interrupted), then, internally, + * xStreamBufferReceiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE. + * If xStreamBufferReceiveFromISR() sets this value to pdTRUE, then normally a + * context switch should be performed before the interrupt is exited. That will + * ensure the interrupt returns directly to the highest priority Ready state + * task. *pxHigherPriorityTaskWoken should be set to pdFALSE before it is + * passed into the function. See the code example below for an example. + * + * @return The number of bytes read from the stream buffer, if any. + * + * Example use: ++// A stream buffer that has already been created. +StreamBuffer_t xStreamBuffer; + +void vAnInterruptServiceRoutine( void ) +{ +uint8_t ucRxData[ 20 ]; +size_t xReceivedBytes; +BaseType_t xHigherPriorityTaskWoken = pdFALSE; // Initialised to pdFALSE. + + // Receive the next stream from the stream buffer. + xReceivedBytes = xStreamBufferReceiveFromISR( xStreamBuffer, + ( void * ) ucRxData, + sizeof( ucRxData ), + &xHigherPriorityTaskWoken ); + + if( xReceivedBytes > 0 ) + { + // ucRxData contains xReceivedBytes read from the stream buffer. + // Process the stream here.... + } + + // If xHigherPriorityTaskWoken was set to pdTRUE inside + // xStreamBufferReceiveFromISR() then a task that has a priority above the + // priority of the currently executing task was unblocked and a context + // switch should be performed to ensure the ISR returns to the unblocked + // task. In most FreeRTOS ports this is done by simply passing + // xHigherPriorityTaskWoken into taskYIELD_FROM_ISR(), which will test the + // variables value, and perform the context switch if necessary. Check the + // documentation for the port in use for port specific instructions. + taskYIELD_FROM_ISR( xHigherPriorityTaskWoken ); +} ++ * \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ); ++ * + * Deletes a stream buffer that was previously created using a call to + * xStreamBufferCreate() or xStreamBufferCreateStatic(). If the stream + * buffer was created using dynamic memory (that is, by xStreamBufferCreate()), + * then the allocated memory is freed. + * + * A stream buffer handle must not be used after the stream buffer has been + * deleted. + * + * @param xStreamBuffer The handle of the stream buffer to be deleted. + * + * \defgroup vStreamBufferDelete vStreamBufferDelete + * \ingroup StreamBufferManagement + */ +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see if it is full. A stream buffer is full if it + * does not have any free space, and therefore cannot accept any more data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is full then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsFull xStreamBufferIsFull + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see if it is empty. A stream buffer is empty if + * it does not contain any data. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return If the stream buffer is empty then pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ); ++ * + * Resets a stream buffer to its initial, empty, state. Any data that was in + * the stream buffer is discarded. A stream buffer can only be reset if there + * are no tasks blocked waiting to either send to or receive from the stream + * buffer. + * + * @param xStreamBuffer The handle of the stream buffer being reset. + * + * @return If the stream buffer is reset then pdPASS is returned. If there was + * a task blocked waiting to send to or read from the stream buffer then the + * stream buffer is not reset and pdFAIL is returned. + * + * \defgroup xStreamBufferReset xStreamBufferReset + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see how much free space it contains, which is + * equal to the amount of data that can be sent to the stream buffer before it + * is full. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be written to the stream buffer before + * the stream buffer would be full. + * + * \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ); ++ * + * Queries a stream buffer to see how much data it contains, which is equal to + * the number of bytes that can be read from the stream buffer before the stream + * buffer would be empty. + * + * @param xStreamBuffer The handle of the stream buffer being queried. + * + * @return The number of bytes that can be read from the stream buffer before + * the stream buffer would be empty. + * + * \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable + * \ingroup StreamBufferManagement + */ +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ); ++ * + * A stream buffer's trigger level is the number of bytes that must be in the + * stream buffer before a task that is blocked on the stream buffer to + * wait for data is moved out of the blocked state. For example, if a task is + * blocked on a read of an empty stream buffer that has a trigger level of 1 + * then the task will be unblocked when a single byte is written to the buffer + * or the task's block time expires. As another example, if a task is blocked + * on a read of an empty stream buffer that has a trigger level of 10 then the + * task will not be unblocked until the stream buffer contains at least 10 bytes + * or the task's block time expires. If a reading task's block time expires + * before the trigger level is reached then the task will still receive however + * many bytes are actually available. Setting a trigger level of 0 will result + * in a trigger level of 1 being used. It is not valid to specify a trigger + * level that is greater than the buffer size. + * + * A trigger level is set when the stream buffer is created, and can be modified + * using xStreamBufferSetTriggerLevel(). + * + * @param xStreamBuffer The handle of the stream buffer being updated. + * + * @param xTriggerLevel The new trigger level for the stream buffer. + * + * @return If xTriggerLevel was less than or equal to the stream buffer's length + * then the trigger level will be updated and pdTRUE is returned. Otherwise + * pdFALSE is returned. + * + * \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbSEND_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is sent to a message buffer or stream buffer. If there was a task that + * was blocked on the message or stream buffer waiting for data to arrive then + * the sbSEND_COMPLETED() macro sends a notification to the task to remove it + * from the Blocked state. xStreamBufferSendCompletedFromISR() does the same + * thing. It is provided to enable application writers to implement their own + * version of sbSEND_COMPLETED(), and MUST NOT BE USED AT ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer to which data was + * written. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferSendCompletedFromISR(). If calling + * xStreamBufferSendCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * stream_buffer.h + * ++BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ); ++ * + * For advanced users only. + * + * The sbRECEIVE_COMPLETED() macro is called from within the FreeRTOS APIs when + * data is read out of a message buffer or stream buffer. If there was a task + * that was blocked on the message or stream buffer waiting for data to arrive + * then the sbRECEIVE_COMPLETED() macro sends a notification to the task to + * remove it from the Blocked state. xStreamBufferReceiveCompletedFromISR() + * does the same thing. It is provided to enable application writers to + * implement their own version of sbRECEIVE_COMPLETED(), and MUST NOT BE USED AT + * ANY OTHER TIME. + * + * See the example implemented in FreeRTOS/Demo/Minimal/MessageBufferAMP.c for + * additional information. + * + * @param xStreamBuffer The handle of the stream buffer from which data was + * read. + * + * @param pxHigherPriorityTaskWoken *pxHigherPriorityTaskWoken should be + * initialised to pdFALSE before it is passed into + * xStreamBufferReceiveCompletedFromISR(). If calling + * xStreamBufferReceiveCompletedFromISR() removes a task from the Blocked state, + * and the task has a priority above the priority of the currently running task, + * then *pxHigherPriorityTaskWoken will get set to pdTRUE indicating that a + * context switch should be performed before exiting the ISR. + * + * @return If a task was removed from the Blocked state then pdTRUE is returned. + * Otherwise pdFALSE is returned. + * + * \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR + * \ingroup StreamBufferManagement + */ +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/* Functions below here are not part of the public API. */ +StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer ) PRIVILEGED_FUNCTION; + +StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) PRIVILEGED_FUNCTION; + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +#endif + +#if defined( __cplusplus ) +} +#endif + +#endif /* !defined( STREAM_BUFFER_H ) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h new file mode 100644 index 0000000..4041bbf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/task.h @@ -0,0 +1,2421 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef INC_TASK_H +#define INC_TASK_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include task.h" +#endif + +#include "list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +#define tskKERNEL_VERSION_NUMBER "V10.2.0" +#define tskKERNEL_VERSION_MAJOR 10 +#define tskKERNEL_VERSION_MINOR 2 +#define tskKERNEL_VERSION_BUILD 0 + +/* MPU region parameters passed in ulParameters + * of MemoryRegion_t struct. */ +#define tskMPU_REGION_READ_ONLY ( 1UL << 0UL ) +#define tskMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define tskMPU_REGION_EXECUTE_NEVER ( 1UL << 2UL ) +#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) +#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) + +/** + * task. h + * + * Type by which tasks are referenced. For example, a call to xTaskCreate + * returns (via a pointer parameter) an TaskHandle_t variable that can then + * be used as a parameter to vTaskDelete to delete the task. + * + * \defgroup TaskHandle_t TaskHandle_t + * \ingroup Tasks + */ +struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tskTaskControlBlock* TaskHandle_t; + +/* + * Defines the prototype to which the application task hook function must + * conform. + */ +typedef BaseType_t (*TaskHookFunction_t)( void * ); + +/* Task states returned by eTaskGetState. */ +typedef enum +{ + eRunning = 0, /* A task is querying the state of itself, so must be running. */ + eReady, /* The task being queried is in a read or pending ready list. */ + eBlocked, /* The task being queried is in the Blocked state. */ + eSuspended, /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + eDeleted, /* The task being queried has been deleted, but its TCB has not yet been freed. */ + eInvalid /* Used as an 'invalid state' value. */ +} eTaskState; + +/* Actions that can be performed when vTaskNotify() is called. */ +typedef enum +{ + eNoAction = 0, /* Notify the task without updating its notify value. */ + eSetBits, /* Set bits in the task's notification value. */ + eIncrement, /* Increment the task's notification value. */ + eSetValueWithOverwrite, /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */ + eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */ +} eNotifyAction; + +/* + * Used internally only. + */ +typedef struct xTIME_OUT +{ + BaseType_t xOverflowCount; + TickType_t xTimeOnEntering; +} TimeOut_t; + +/* + * Defines the memory ranges allocated to the task when an MPU is used. + */ +typedef struct xMEMORY_REGION +{ + void *pvBaseAddress; + uint32_t ulLengthInBytes; + uint32_t ulParameters; +} MemoryRegion_t; + +/* + * Parameters required to create an MPU protected task. + */ +typedef struct xTASK_PARAMETERS +{ + TaskFunction_t pvTaskCode; + const char * const pcName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + configSTACK_DEPTH_TYPE usStackDepth; + void *pvParameters; + UBaseType_t uxPriority; + StackType_t *puxStackBuffer; + MemoryRegion_t xRegions[ portNUM_CONFIGURABLE_REGIONS ]; + #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + StaticTask_t * const pxTaskBuffer; + #endif +} TaskParameters_t; + +/* Used with the uxTaskGetSystemState() function to return the state of each task +in the system. */ +typedef struct xTASK_STATUS +{ + TaskHandle_t xHandle; /* The handle of the task to which the rest of the information in the structure relates. */ + const char *pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + UBaseType_t xTaskNumber; /* A number unique to the task. */ + eTaskState eCurrentState; /* The state in which the task existed when the structure was populated. */ + UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ + UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ + uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See http://www.freertos.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t *pxStackBase; /* Points to the lowest address of the task's stack area. */ + configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ +} TaskStatus_t; + +/* Possible return values for eTaskConfirmSleepModeStatus(). */ +typedef enum +{ + eAbortSleep = 0, /* A task has been made ready or a context switch pended since portSUPPORESS_TICKS_AND_SLEEP() was called - abort entering a sleep mode. */ + eStandardSleep, /* Enter a sleep mode that will not last any longer than the expected idle time. */ + eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe to enter a sleep mode that can only be exited by an external interrupt. */ +} eSleepModeStatus; + +/** + * Defines the priority used by the idle task. This must not be modified. + * + * \ingroup TaskUtils + */ +#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) + +/** + * task. h + * + * Macro for forcing a context switch. + * + * \defgroup taskYIELD taskYIELD + * \ingroup SchedulerControl + */ +#define taskYIELD() portYIELD() + +/** + * task. h + * + * Macro to mark the start of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL + * \ingroup SchedulerControl + */ +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/** + * task. h + * + * Macro to mark the end of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL + * \ingroup SchedulerControl + */ +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +/** + * task. h + * + * Macro to disable all maskable interrupts. + * + * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to enable microcontroller interrupts. + * + * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() + +/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is +0 to generate more optimal code when configASSERT() is defined as the constant +is used in assert() statements. */ +#define taskSCHEDULER_SUSPENDED ( ( BaseType_t ) 0 ) +#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) +#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) + + +/*----------------------------------------------------------- + * TASK CREATION API + *----------------------------------------------------------*/ + +/** + * task. h + *+ BaseType_t xTaskCreate( + TaskFunction_t pvTaskCode, + const char * const pcName, + configSTACK_DEPTH_TYPE usStackDepth, + void *pvParameters, + UBaseType_t uxPriority, + TaskHandle_t *pvCreatedTask + );+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. + * + * xTaskCreate() can only be used to create a task that has unrestricted + * access to the entire microcontroller memory map. Systems that include MPU + * support can alternatively create an MPU constrained task using + * xTaskCreateRestricted(). + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default + * is 16. + * + * @param usStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task should run. Systems that + * include MPU support can optionally create tasks in a privileged (system) + * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For + * example, to create a privileged task at priority 2 the uxPriority parameter + * should be set to ( 2 | portPRIVILEGE_BIT ). + * + * @param pvCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++ // Task to be created. + void vTaskCode( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + } + } + + // Function that creates a task. + void vOtherFunction( void ) + { + static uint8_t ucParameterToPass; + TaskHandle_t xHandle = NULL; + + // Create the task, storing the handle. Note that the passed parameter ucParameterToPass + // must exist for the lifetime of the task, so in this case is declared static. If it was just an + // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time + // the new task attempts to access it. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle ); + configASSERT( xHandle ); + + // Use the handle to delete the task. + if( xHandle != NULL ) + { + vTaskDelete( xHandle ); + } + } ++ * \defgroup xTaskCreate xTaskCreate + * \ingroup Tasks + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode, + const char * const pcName, + uint32_t ulStackDepth, + void *pvParameters, + UBaseType_t uxPriority, + StackType_t *pxStackBuffer, + StaticTask_t *pxTaskBuffer );+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param pxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will + * be created and a handle to the created task is returned. If either + * pxStackBuffer or pxTaskBuffer are NULL then the task will not be created and + * NULL is returned. + * + * Example usage: ++ + // Dimensions the buffer that the task being created will use as its stack. + // NOTE: This is the number of words the stack will hold, not the number of + // bytes. For example, if each stack item is 32-bits, and this is set to 100, + // then 400 bytes (100 * 32-bits) will be allocated. + #define STACK_SIZE 200 + + // Structure that will hold the TCB of the task being created. + StaticTask_t xTaskBuffer; + + // Buffer that the task being created will use as its stack. Note this is + // an array of StackType_t variables. The size of StackType_t is dependent on + // the RTOS port. + StackType_t xStack[ STACK_SIZE ]; + + // Function that implements the task being created. + void vTaskCode( void * pvParameters ) + { + // The parameter value is expected to be 1 as 1 is passed in the + // pvParameters value in the call to xTaskCreateStatic(). + configASSERT( ( uint32_t ) pvParameters == 1UL ); + + for( ;; ) + { + // Task code goes here. + } + } + + // Function that creates a task. + void vOtherFunction( void ) + { + TaskHandle_t xHandle = NULL; + + // Create the task without using any dynamic memory allocation. + xHandle = xTaskCreateStatic( + vTaskCode, // Function that implements the task. + "NAME", // Text name for the task. + STACK_SIZE, // Stack size in words, not bytes. + ( void * ) 1, // Parameter passed into the task. + tskIDLE_PRIORITY,// Priority at which the task is created. + xStack, // Array to use as the task's stack. + &xTaskBuffer ); // Variable to hold the task's data structure. + + // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have + // been created, and xHandle will be the task's handle. Use the handle + // to suspend the task. + vTaskSuspend( xHandle ); + } ++ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * task. h + *+ BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );+ * + * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1. + * + * xTaskCreateRestricted() should only be used in systems that include an MPU + * implementation. + * + * Create a new task and add it to the list of tasks that are ready to run. + * The function parameters define the memory regions and associated access + * permissions allocated to the task. + * + * See xTaskCreateRestrictedStatic() for a version that does not use any + * dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++// Create an TaskParameters_t structure that defines the task to be created. +static const TaskParameters_t xCheckTaskParameters = +{ + vATask, // pvTaskCode - the function that implements the task. + "ATask", // pcName - just a text name for the task to assist debugging. + 100, // usStackDepth - the stack size DEFINED IN WORDS. + NULL, // pvParameters - passed into the task function as the function parameters. + ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state. + cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack. + + // xRegions - Allocate up to three separate memory regions for access by + // the task, with appropriate access permissions. Different processors have + // different memory alignment requirements - refer to the FreeRTOS documentation + // for full information. + { + // Base address Length Parameters + { cReadWriteArray, 32, portMPU_REGION_READ_WRITE }, + { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY }, + { cPrivilegedOnlyAccessArray, 128, portMPU_REGION_PRIVILEGED_READ_WRITE } + } +}; + +int main( void ) +{ +TaskHandle_t xHandle; + + // Create a task from the const structure defined above. The task handle + // is requested (the second parameter is not NULL) but in this case just for + // demonstration purposes as its not actually used. + xTaskCreateRestricted( &xRegTest1Parameters, &xHandle ); + + // Start the scheduler. + vTaskStartScheduler(); + + // Will only get here if there was insufficient memory to create the idle + // and/or timer task. + for( ;; ); +} ++ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +#if( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );+ * + * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1. + * + * xTaskCreateRestrictedStatic() should only be used in systems that include an + * MPU implementation. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreateRestricted() then the stack is provided by the application writer, + * and the memory used to hold the task's data structure is automatically + * dynamically allocated inside the xTaskCreateRestricted() function. If a task + * is created using xTaskCreateRestrictedStatic() then the application writer + * must provide the memory used to hold the task's data structures too. + * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be + * created without using any dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure + * contains an additional member, which is used to point to a variable of type + * StaticTask_t - which is then used to hold the task's data structure. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: ++// Create an TaskParameters_t structure that defines the task to be created. +// The StaticTask_t variable is only included in the structure when +// configSUPPORT_STATIC_ALLOCATION is set to 1. The PRIVILEGED_DATA macro can +// be used to force the variable into the RTOS kernel's privileged data area. +static PRIVILEGED_DATA StaticTask_t xTaskBuffer; +static const TaskParameters_t xCheckTaskParameters = +{ + vATask, // pvTaskCode - the function that implements the task. + "ATask", // pcName - just a text name for the task to assist debugging. + 100, // usStackDepth - the stack size DEFINED IN WORDS. + NULL, // pvParameters - passed into the task function as the function parameters. + ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state. + cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack. + + // xRegions - Allocate up to three separate memory regions for access by + // the task, with appropriate access permissions. Different processors have + // different memory alignment requirements - refer to the FreeRTOS documentation + // for full information. + { + // Base address Length Parameters + { cReadWriteArray, 32, portMPU_REGION_READ_WRITE }, + { cReadOnlyArray, 32, portMPU_REGION_READ_ONLY }, + { cPrivilegedOnlyAccessArray, 128, portMPU_REGION_PRIVILEGED_READ_WRITE } + } + + &xTaskBuffer; // Holds the task's data structure. +}; + +int main( void ) +{ +TaskHandle_t xHandle; + + // Create a task from the const structure defined above. The task handle + // is requested (the second parameter is not NULL) but in this case just for + // demonstration purposes as its not actually used. + xTaskCreateRestricted( &xRegTest1Parameters, &xHandle ); + + // Start the scheduler. + vTaskStartScheduler(); + + // Will only get here if there was insufficient memory to create the idle + // and/or timer task. + for( ;; ); +} ++ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic + * \ingroup Tasks + */ +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *+ void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );+ * + * Memory regions are assigned to a restricted task when the task is created by + * a call to xTaskCreateRestricted(). These regions can be redefined using + * vTaskAllocateMPURegions(). + * + * @param xTask The handle of the task being updated. + * + * @param xRegions A pointer to an MemoryRegion_t structure that contains the + * new memory region definitions. + * + * Example usage: ++// Define an array of MemoryRegion_t structures that configures an MPU region +// allowing read/write access for 1024 bytes starting at the beginning of the +// ucOneKByte array. The other two of the maximum 3 definable regions are +// unused so set to zero. +static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] = +{ + // Base address Length Parameters + { ucOneKByte, 1024, portMPU_REGION_READ_WRITE }, + { 0, 0, 0 }, + { 0, 0, 0 } +}; + +void vATask( void *pvParameters ) +{ + // This task was created such that it has access to certain regions of + // memory as defined by the MPU configuration. At some point it is + // desired that these MPU regions are replaced with that defined in the + // xAltRegions const struct above. Use a call to vTaskAllocateMPURegions() + // for this purpose. NULL is used as the task handle to indicate that this + // function should modify the MPU regions of the calling task. + vTaskAllocateMPURegions( NULL, xAltRegions ); + + // Now the task can continue its function, but from this point on can only + // access its stack and the ucOneKByte array (unless any other statically + // defined or shared regions have been declared elsewhere). +} ++ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskDelete( TaskHandle_t xTask );+ * + * INCLUDE_vTaskDelete must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Remove a task from the RTOS real time kernel's management. The task being + * deleted will be removed from all ready, blocked, suspended and event lists. + * + * NOTE: The idle task is responsible for freeing the kernel allocated + * memory from tasks that have been deleted. It is therefore important that + * the idle task is not starved of microcontroller processing time if your + * application makes any calls to vTaskDelete (). Memory allocated by the + * task code is not automatically freed, and should be freed before the task + * is deleted. + * + * See the demo application file death.c for sample code that utilises + * vTaskDelete (). + * + * @param xTask The handle of the task to be deleted. Passing NULL will + * cause the calling task to be deleted. + * + * Example usage: ++ void vOtherFunction( void ) + { + TaskHandle_t xHandle; + + // Create the task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // Use the handle to delete the task. + vTaskDelete( xHandle ); + } ++ * \defgroup vTaskDelete vTaskDelete + * \ingroup Tasks + */ +void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK CONTROL API + *----------------------------------------------------------*/ + +/** + * task. h + *void vTaskDelay( const TickType_t xTicksToDelay );+ * + * Delay a task for a given number of ticks. The actual time that the + * task remains blocked depends on the tick rate. The constant + * portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * INCLUDE_vTaskDelay must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * + * vTaskDelay() specifies a time at which the task wishes to unblock relative to + * the time at which vTaskDelay() is called. For example, specifying a block + * period of 100 ticks will cause the task to unblock 100 ticks after + * vTaskDelay() is called. vTaskDelay() does not therefore provide a good method + * of controlling the frequency of a periodic task as the path taken through the + * code, as well as other task and interrupt activity, will effect the frequency + * at which vTaskDelay() gets called and therefore the time at which the task + * next executes. See vTaskDelayUntil() for an alternative API function designed + * to facilitate fixed frequency execution. It does this by specifying an + * absolute time (rather than a relative time) at which the calling task should + * unblock. + * + * @param xTicksToDelay The amount of time, in tick periods, that + * the calling task should block. + * + * Example usage: + + void vTaskFunction( void * pvParameters ) + { + // Block for 500ms. + const TickType_t xDelay = 500 / portTICK_PERIOD_MS; + + for( ;; ) + { + // Simply toggle the LED every 500ms, blocking between each toggle. + vToggleLED(); + vTaskDelay( xDelay ); + } + } + + * \defgroup vTaskDelay vTaskDelay + * \ingroup TaskCtrl + */ +void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );+ * + * INCLUDE_vTaskDelayUntil must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Delay a task until a specified time. This function can be used by periodic + * tasks to ensure a constant execution frequency. + * + * This function differs from vTaskDelay () in one important aspect: vTaskDelay () will + * cause a task to block for the specified number of ticks from the time vTaskDelay () is + * called. It is therefore difficult to use vTaskDelay () by itself to generate a fixed + * execution frequency as the time between a task starting to execute and that task + * calling vTaskDelay () may not be fixed [the task may take a different path though the + * code between calls, or may get interrupted or preempted a different number of times + * each time it executes]. + * + * Whereas vTaskDelay () specifies a wake time relative to the time at which the function + * is called, vTaskDelayUntil () specifies the absolute (exact) time at which it wishes to + * unblock. + * + * The constant portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the + * task was last unblocked. The variable must be initialised with the current time + * prior to its first use (see the example below). Following this the variable is + * automatically updated within vTaskDelayUntil (). + * + * @param xTimeIncrement The cycle time period. The task will be unblocked at + * time *pxPreviousWakeTime + xTimeIncrement. Calling vTaskDelayUntil with the + * same xTimeIncrement parameter value will cause the task to execute with + * a fixed interface period. + * + * Example usage: ++ // Perform an action every 10 ticks. + void vTaskFunction( void * pvParameters ) + { + TickType_t xLastWakeTime; + const TickType_t xFrequency = 10; + + // Initialise the xLastWakeTime variable with the current time. + xLastWakeTime = xTaskGetTickCount (); + for( ;; ) + { + // Wait for the next cycle. + vTaskDelayUntil( &xLastWakeTime, xFrequency ); + + // Perform action here. + } + } ++ * \defgroup vTaskDelayUntil vTaskDelayUntil + * \ingroup TaskCtrl + */ +void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskAbortDelay( TaskHandle_t xTask );+ * + * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this + * function to be available. + * + * A task will enter the Blocked state when it is waiting for an event. The + * event it is waiting for can be a temporal event (waiting for a time), such + * as when vTaskDelay() is called, or an event on an object, such as when + * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task + * that is in the Blocked state is used in a call to xTaskAbortDelay() then the + * task will leave the Blocked state, and return from whichever function call + * placed the task into the Blocked state. + * + * @param xTask The handle of the task to remove from the Blocked state. + * + * @return If the task referenced by xTask was not in the Blocked state then + * pdFAIL is returned. Otherwise pdPASS is returned. + * + * \defgroup xTaskAbortDelay xTaskAbortDelay + * \ingroup TaskCtrl + */ +BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );+ * + * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the priority of any task. + * + * @param xTask Handle of the task to be queried. Passing a NULL + * handle results in the priority of the calling task being returned. + * + * @return The priority of xTask. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to obtain the priority of the created task. + // It was created with tskIDLE_PRIORITY, but may have changed + // it itself. + if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY ) + { + // The task has changed it's priority. + } + + // ... + + // Is our priority higher than the created task? + if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) ) + { + // Our priority (obtained using NULL handle) is higher. + } + } ++ * \defgroup uxTaskPriorityGet uxTaskPriorityGet + * \ingroup TaskCtrl + */ +UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );+ * + * A version of uxTaskPriorityGet() that can be used from an ISR. + */ +UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *eTaskState eTaskGetState( TaskHandle_t xTask );+ * + * INCLUDE_eTaskGetState must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the state of any task. States are encoded by the eTaskState + * enumerated type. + * + * @param xTask Handle of the task to be queried. + * + * @return The state of xTask at the time the function was called. Note the + * state of the task might change between the function being called, and the + * functions return value being tested by the calling task. + */ +eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );+ * + * configUSE_TRACE_FACILITY must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * Populates a TaskStatus_t structure with information about a task. + * + * @param xTask Handle of the task being queried. If xTask is NULL then + * information will be returned about the calling task. + * + * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be + * filled with information about the task referenced by the handle passed using + * the xTask parameter. + * + * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report + * the stack high water mark of the task being queried. Calculating the stack + * high water mark takes a relatively long time, and can make the system + * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to + * allow the high water mark checking to be skipped. The high watermark value + * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is + * not set to pdFALSE; + * + * @param eState The TaskStatus_t structure contains a member to report the + * state of the task being queried. Obtaining the task state is not as fast as + * a simple assignment - so the eState parameter is provided to allow the state + * information to be omitted from the TaskStatus_t structure. To obtain state + * information then set eState to eInvalid - otherwise the value passed in + * eState will be reported as the task state in the TaskStatus_t structure. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + TaskStatus_t xTaskDetails; + + // Obtain the handle of a task from its name. + xHandle = xTaskGetHandle( "Task_Name" ); + + // Check the handle is not NULL. + configASSERT( xHandle ); + + // Use the handle to obtain further information about the task. + vTaskGetInfo( xHandle, + &xTaskDetails, + pdTRUE, // Include the high water mark in xTaskDetails. + eInvalid ); // Include the task state in xTaskDetails. + } ++ * \defgroup vTaskGetInfo vTaskGetInfo + * \ingroup TaskCtrl + */ +void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );+ * + * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Set the priority of any task. + * + * A context switch will occur before the function returns if the priority + * being set is higher than the currently executing task. + * + * @param xTask Handle to the task for which the priority is being set. + * Passing a NULL handle results in the priority of the calling task being set. + * + * @param uxNewPriority The priority to which the task will be set. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to raise the priority of the created task. + vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 ); + + // ... + + // Use a NULL handle to raise our priority to the same value. + vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 ); + } ++ * \defgroup vTaskPrioritySet vTaskPrioritySet + * \ingroup TaskCtrl + */ +void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskSuspend( TaskHandle_t xTaskToSuspend );+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Suspend any task. When suspended a task will never get any microcontroller + * processing time, no matter what its priority. + * + * Calls to vTaskSuspend are not accumulative - + * i.e. calling vTaskSuspend () twice on the same task still only requires one + * call to vTaskResume () to ready the suspended task. + * + * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL + * handle will cause the calling task to be suspended. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to suspend the created task. + vTaskSuspend( xHandle ); + + // ... + + // The created task will not run during this period, unless + // another task calls vTaskResume( xHandle ). + + //... + + + // Suspend ourselves. + vTaskSuspend( NULL ); + + // We cannot get here unless another task calls vTaskResume + // with our handle as the parameter. + } ++ * \defgroup vTaskSuspend vTaskSuspend + * \ingroup TaskCtrl + */ +void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskResume( TaskHandle_t xTaskToResume );+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Resumes a suspended task. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * vTaskResume (). + * + * @param xTaskToResume Handle to the task being readied. + * + * Example usage: ++ void vAFunction( void ) + { + TaskHandle_t xHandle; + + // Create a task, storing the handle. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle ); + + // ... + + // Use the handle to suspend the created task. + vTaskSuspend( xHandle ); + + // ... + + // The created task will not run during this period, unless + // another task calls vTaskResume( xHandle ). + + //... + + + // Resume the suspended task ourselves. + vTaskResume( xHandle ); + + // The created task will once again get microcontroller processing + // time in accordance with its priority within the system. + } ++ * \defgroup vTaskResume vTaskResume + * \ingroup TaskCtrl + */ +void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void xTaskResumeFromISR( TaskHandle_t xTaskToResume );+ * + * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * An implementation of vTaskResume() that can be called from within an ISR. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * xTaskResumeFromISR (). + * + * xTaskResumeFromISR() should not be used to synchronise a task with an + * interrupt if there is a chance that the interrupt could arrive prior to the + * task being suspended - as this can lead to interrupts being missed. Use of a + * semaphore as a synchronisation mechanism would avoid this eventuality. + * + * @param xTaskToResume Handle to the task being readied. + * + * @return pdTRUE if resuming the task should result in a context switch, + * otherwise pdFALSE. This is used by the ISR to determine if a context switch + * may be required following the ISR. + * + * \defgroup vTaskResumeFromISR vTaskResumeFromISR + * \ingroup TaskCtrl + */ +BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * SCHEDULER CONTROL + *----------------------------------------------------------*/ + +/** + * task. h + *void vTaskStartScheduler( void );+ * + * Starts the real time kernel tick processing. After calling the kernel + * has control over which tasks are executed and when. + * + * See the demo application file main.c for an example of creating + * tasks and starting the kernel. + * + * Example usage: ++ void vAFunction( void ) + { + // Create at least one task before starting the kernel. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL ); + + // Start the real time kernel with preemption. + vTaskStartScheduler (); + + // Will not get here unless a task calls vTaskEndScheduler () + } ++ * + * \defgroup vTaskStartScheduler vTaskStartScheduler + * \ingroup SchedulerControl + */ +void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskEndScheduler( void );+ * + * NOTE: At the time of writing only the x86 real mode port, which runs on a PC + * in place of DOS, implements this function. + * + * Stops the real time kernel tick. All created tasks will be automatically + * deleted and multitasking (either preemptive or cooperative) will + * stop. Execution then resumes from the point where vTaskStartScheduler () + * was called, as if vTaskStartScheduler () had just returned. + * + * See the demo application file main. c in the demo/PC directory for an + * example that uses vTaskEndScheduler (). + * + * vTaskEndScheduler () requires an exit function to be defined within the + * portable layer (see vPortEndScheduler () in port. c for the PC port). This + * performs hardware specific operations such as stopping the kernel tick. + * + * vTaskEndScheduler () will cause all of the resources allocated by the + * kernel to be freed - but will not free resources allocated by application + * tasks. + * + * Example usage: ++ void vTaskCode( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // At some point we want to end the real time kernel processing + // so call ... + vTaskEndScheduler (); + } + } + + void vAFunction( void ) + { + // Create at least one task before starting the kernel. + xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL ); + + // Start the real time kernel with preemption. + vTaskStartScheduler (); + + // Will only get here when the vTaskCode () task has called + // vTaskEndScheduler (). When we get here we are back to single task + // execution. + } ++ * + * \defgroup vTaskEndScheduler vTaskEndScheduler + * \ingroup SchedulerControl + */ +void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskSuspendAll( void );+ * + * Suspends the scheduler without disabling interrupts. Context switches will + * not occur while the scheduler is suspended. + * + * After calling vTaskSuspendAll () the calling task will continue to execute + * without risk of being swapped out until a call to xTaskResumeAll () has been + * made. + * + * API functions that have the potential to cause a context switch (for example, + * vTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler + * is suspended. + * + * Example usage: ++ void vTask1( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // ... + + // At some point the task wants to perform a long operation during + // which it does not want to get swapped out. It cannot use + // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the + // operation may cause interrupts to be missed - including the + // ticks. + + // Prevent the real time kernel swapping out the task. + vTaskSuspendAll (); + + // Perform the operation here. There is no need to use critical + // sections as we have all the microcontroller processing time. + // During this time interrupts will still operate and the kernel + // tick count will be maintained. + + // ... + + // The operation is complete. Restart the kernel. + xTaskResumeAll (); + } + } ++ * \defgroup vTaskSuspendAll vTaskSuspendAll + * \ingroup SchedulerControl + */ +void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskResumeAll( void );+ * + * Resumes scheduler activity after it was suspended by a call to + * vTaskSuspendAll(). + * + * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks + * that were previously suspended by a call to vTaskSuspend(). + * + * @return If resuming the scheduler caused a context switch then pdTRUE is + * returned, otherwise pdFALSE is returned. + * + * Example usage: ++ void vTask1( void * pvParameters ) + { + for( ;; ) + { + // Task code goes here. + + // ... + + // At some point the task wants to perform a long operation during + // which it does not want to get swapped out. It cannot use + // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the + // operation may cause interrupts to be missed - including the + // ticks. + + // Prevent the real time kernel swapping out the task. + vTaskSuspendAll (); + + // Perform the operation here. There is no need to use critical + // sections as we have all the microcontroller processing time. + // During this time interrupts will still operate and the real + // time kernel tick count will be maintained. + + // ... + + // The operation is complete. Restart the kernel. We want to force + // a context switch - but there is no point if resuming the scheduler + // caused a context switch already. + if( !xTaskResumeAll () ) + { + taskYIELD (); + } + } + } ++ * \defgroup xTaskResumeAll xTaskResumeAll + * \ingroup SchedulerControl + */ +BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- + * TASK UTILITIES + *----------------------------------------------------------*/ + +/** + * task. h + *TickType_t xTaskGetTickCount( void );+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * \defgroup xTaskGetTickCount xTaskGetTickCount + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *TickType_t xTaskGetTickCountFromISR( void );+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * This is a version of xTaskGetTickCount() that is safe to be called from an + * ISR - provided that TickType_t is the natural word size of the + * microcontroller being used or interrupt nesting is either not supported or + * not being used. + * + * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *uint16_t uxTaskGetNumberOfTasks( void );+ * + * @return The number of tasks that the real time kernel is currently managing. + * This includes all ready, blocked and suspended tasks. A task that + * has been deleted but not yet freed by the idle task will also be + * included in the count. + * + * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks + * \ingroup TaskUtils + */ +UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *char *pcTaskGetName( TaskHandle_t xTaskToQuery );+ * + * @return The text (human readable) name of the task referenced by the handle + * xTaskToQuery. A task can query its own name by either passing in its own + * handle, or by setting xTaskToQuery to NULL. + * + * \defgroup pcTaskGetName pcTaskGetName + * \ingroup TaskUtils + */ +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );+ * + * NOTE: This function takes a relatively long time to complete and should be + * used sparingly. + * + * @return The handle of the task that has the human readable name pcNameToQuery. + * NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle + * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available. + * + * \defgroup pcTaskGetHandle pcTaskGetHandle + * \ingroup TaskUtils + */ +TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task.h + *UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );+ * + * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task.h + *configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );+ * + * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* When using trace macros it is sometimes necessary to include task.h before +FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been defined, +so the following two prototypes will cause a compilation error. This can be +fixed by simply guarding against the inclusion of these two prototypes unless +they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration +constant. */ +#ifdef configUSE_APPLICATION_TASK_TAG + #if configUSE_APPLICATION_TASK_TAG == 1 + /** + * task.h + *void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );+ * + * Sets pxHookFunction to be the task hook function used by the task xTask. + * Passing xTask as NULL has the effect of setting the calling tasks hook + * function. + */ + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION; + + /** + * task.h + *void xTaskGetApplicationTaskTag( TaskHandle_t xTask );+ * + * Returns the pxHookFunction value assigned to the task xTask. Do not + * call from an interrupt service routine - call + * xTaskGetApplicationTaskTagFromISR() instead. + */ + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + /** + * task.h + *void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );+ * + * Returns the pxHookFunction value assigned to the task xTask. Can + * be called from an interrupt service routine. + */ + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + #endif /* configUSE_APPLICATION_TASK_TAG ==1 */ +#endif /* ifdef configUSE_APPLICATION_TASK_TAG */ + +#if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + + /* Each task contains an array of pointers that is dimensioned by the + configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The + kernel does not use the pointers itself, so the application writer can use + the pointers for any purpose they wish. The following two functions are + used to set and query a pointer respectively. */ + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) PRIVILEGED_FUNCTION; + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) PRIVILEGED_FUNCTION; + +#endif + +/** + * task.h + *BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );+ * + * Calls the hook function associated with xTask. Passing xTask as NULL has + * the effect of calling the Running tasks (the calling task) hook function. + * + * pvParameter is passed to the hook function for the task to interpret as it + * wants. The return value is the value returned by the task hook function + * registered by the user. + */ +BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) PRIVILEGED_FUNCTION; + +/** + * xTaskGetIdleTaskHandle() is only available if + * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. + * + * Simply returns the handle of the idle task. It is not valid to call + * xTaskGetIdleTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for + * uxTaskGetSystemState() to be available. + * + * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in + * the system. TaskStatus_t structures contain, among other things, members + * for the task handle, task name, task priority, task state, and total amount + * of run time consumed by the task. See the TaskStatus_t structure + * definition in this file for the full member list. + * + * NOTE: This function is intended for debugging use only as its use results in + * the scheduler remaining suspended for an extended period. + * + * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures. + * The array must contain at least one TaskStatus_t structure for each task + * that is under the control of the RTOS. The number of tasks under the control + * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API function. + * + * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray + * parameter. The size is specified as the number of indexes in the array, or + * the number of TaskStatus_t structures contained in the array, not by the + * number of bytes in the array. + * + * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in + * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to the + * total run time (as defined by the run time stats clock, see + * http://www.freertos.org/rtos-run-time-stats.html) since the target booted. + * pulTotalRunTime can be set to NULL to omit the total run time information. + * + * @return The number of TaskStatus_t structures that were populated by + * uxTaskGetSystemState(). This should equal the number returned by the + * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed + * in the uxArraySize parameter was too small. + * + * Example usage: ++ // This example demonstrates how a human readable table of run time stats + // information is generated from raw data provided by uxTaskGetSystemState(). + // The human readable table is written to pcWriteBuffer + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + volatile UBaseType_t uxArraySize, x; + uint32_t ulTotalRunTime, ulStatsAsPercentage; + + // Make sure the write buffer does not contain a string. + *pcWriteBuffer = 0x00; + + // Take a snapshot of the number of tasks in case it changes while this + // function is executing. + uxArraySize = uxTaskGetNumberOfTasks(); + + // Allocate a TaskStatus_t structure for each task. An array could be + // allocated statically at compile time. + pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t ) ); + + if( pxTaskStatusArray != NULL ) + { + // Generate raw status information about each task. + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalRunTime ); + + // For percentage calculations. + ulTotalRunTime /= 100UL; + + // Avoid divide by zero errors. + if( ulTotalRunTime > 0 ) + { + // For each populated position in the pxTaskStatusArray array, + // format the raw data as human readable ASCII data + for( x = 0; x < uxArraySize; x++ ) + { + // What percentage of the total run time has the task used? + // This will always be rounded down to the nearest integer. + // ulTotalRunTimeDiv100 has already been divided by 100. + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalRunTime; + + if( ulStatsAsPercentage > 0UL ) + { + sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + else + { + // If the percentage is zero here then the task has + // consumed less than 1% of the total run time. + sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + + pcWriteBuffer += strlen( ( char * ) pcWriteBuffer ); + } + } + + // The array is no longer needed, free the memory it consumes. + vPortFree( pxTaskStatusArray ); + } + } ++ */ +UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) PRIVILEGED_FUNCTION; + +/** + * task. h + *void vTaskList( char *pcWriteBuffer );+ * + * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must + * both be defined as 1 for this function to be available. See the + * configuration section of the FreeRTOS.org website for more information. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Lists all the current tasks, along with their current state and stack + * usage high water mark. + * + * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or + * suspended ('S'). + * + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays task + * names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that might + * bloat the code size, use a lot of stack, and provide different results on + * different platforms. An alternative, tiny, third party, and limited + * functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly through a + * call to vTaskList(). + * + * @param pcWriteBuffer A buffer into which the above mentioned details + * will be written, in ASCII form. This buffer is assumed to be large + * enough to contain the generated report. Approximately 40 bytes per + * task should be sufficient. + * + * \defgroup vTaskList vTaskList + * \ingroup TaskUtils + */ +void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *void vTaskGetRunTimeStats( char *pcWriteBuffer );+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * Calling vTaskGetRunTimeStats() writes the total execution time of each + * task into a buffer, both as an absolute count value and as a percentage + * of the total system execution time. + * + * NOTE 2: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays the + * amount of time each task has spent in the Running state in both absolute and + * percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function + * that might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, and + * limited functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() directly + * to get access to raw stats data, rather than indirectly through a call to + * vTaskGetRunTimeStats(). + * + * @param pcWriteBuffer A buffer into which the execution times will be + * written, in ASCII form. This buffer is assumed to be large enough to + * contain the generated report. Approximately 40 bytes per task should + * be sufficient. + * + * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats + * \ingroup TaskUtils + */ +void vTaskGetRunTimeStats( char *pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** +* task. h +*TickType_t xTaskGetIdleRunTimeCounter( void );+* +* configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS +* must both be defined as 1 for this function to be available. The application +* must also then provide definitions for +* portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() +* to configure a peripheral timer/counter and return the timers current count +* value respectively. The counter should be at least 10 times the frequency of +* the tick count. +* +* Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total +* accumulated execution time being stored for each task. The resolution +* of the accumulated time value depends on the frequency of the timer +* configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. +* While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total +* execution time of each task into a buffer, xTaskGetIdleRunTimeCounter() +* returns the total execution time of just the idle task. +* +* @return The total run time of the idle task. This is the amount of time the +* idle task has actually been executing. The unit of time is dependent on the +* frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and +* portGET_RUN_TIME_COUNTER_VALUE() macros. +* +* \defgroup xTaskGetIdleRunTimeCounter xTaskGetIdleRunTimeCounter +* \ingroup TaskUtils +*/ +TickType_t xTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * pulPreviousNotificationValue - + * Can be used to pass out the subject task's notification value before any + * bits are modified by the notify function. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; +#define xTaskNotify( xTaskToNotify, ulValue, eAction ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL ) +#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) xTaskGenericNotify( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) + +/** + * task. h + *BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotify() that can be used from an interrupt service routine + * (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNofify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should + * be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotify xTaskNotify + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWait() or ulTaskNotifyTake(). If the task was + * already in the Blocked state to wait for a notification when the notification + * arrives then the task will automatically be removed from the Blocked state + * (unblocked) and the notification cleared. + * + * A task can use xTaskNotifyWait() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTake() to [optionally] block + * to wait for its notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value + * will be cleared in the calling task's notification value before the task + * checks to see if any notifications are pending, and optionally blocks if no + * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if + * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have + * the effect of resetting the task's notification value to 0. Setting + * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged. + * + * @param ulBitsToClearOnExit If a notification is pending or received before + * the calling task exits the xTaskNotifyWait() function then the task's + * notification value (see the xTaskNotify() API function) is passed out using + * the pulNotificationValue parameter. Then any bits that are set in + * ulBitsToClearOnExit will be cleared in the task's notification value (note + * *pulNotificationValue is set before any bits are cleared). Setting + * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL + * (if limits.h is not included) will have the effect of resetting the task's + * notification value to 0 before the function exits. Setting + * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged + * when the function exits (in which case the value passed out in + * pulNotificationValue will match the task's notification value). + * + * @param pulNotificationValue Used to pass the task's notification value out + * of the function. Note the value passed out will not be effected by the + * clearing of any bits caused by ulBitsToClearOnExit being non-zero. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for a notification to be received, should a notification + * not already be pending when xTaskNotifyWait() was called. The task + * will not consume any processing time while it is in the Blocked state. This + * is specified in kernel ticks, the macro pdMS_TO_TICSK( value_in_ms ) can be + * used to convert a time specified in milliseconds to a time specified in + * ticks. + * + * @return If a notification was received (including notifications that were + * already pending when xTaskNotifyWait was called) then pdPASS is + * returned. Otherwise pdFAIL is returned. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * xTaskNotifyGive() is a helper macro intended for use when task notifications + * are used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given using the xSemaphoreGive() API function, + * the equivalent action that instead uses a task notification is + * xTaskNotifyGive(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the + * eAction parameter set to eIncrement - so pdPASS is always returned. + * + * \defgroup xTaskNotifyGive xTaskNotifyGive + * \ingroup TaskNotifications + */ +#define xTaskNotifyGive( xTaskToNotify ) xTaskGenericNotify( ( xTaskToNotify ), ( 0 ), eIncrement, NULL ) + +/** + * task. h + *void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken ); + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * A version of xTaskNotifyGive() that can be called from an interrupt service + * routine (ISR). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * vTaskNotifyGiveFromISR() is intended for use when task notifications are + * used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given from an ISR using the + * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses + * a task notification is vTaskNotifyGiveFromISR(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTake() API function rather than the + * xTaskNotifyWait() API function. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param pxHigherPriorityTaskWoken vTaskNotifyGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch + * should be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * \defgroup xTaskNotifyWait xTaskNotifyWait + * \ingroup TaskNotifications + */ +void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + +/** + * task. h + *uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );+ * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * When configUSE_TASK_NOTIFICATIONS is set to one each task has its own private + * "notification value", which is a 32-bit unsigned integer (uint32_t). + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment the task's notification value. In that way + * task notifications can be used to send data to a task, or be used as light + * weight and fast binary or counting semaphores. + * + * ulTaskNotifyTake() is intended for use when a task notification is used as a + * faster and lighter weight binary or counting semaphore alternative. Actual + * FreeRTOS semaphores are taken using the xSemaphoreTake() API function, the + * equivalent action that instead uses a task notification is + * ulTaskNotifyTake(). + * + * When a task is using its notification value as a binary or counting semaphore + * other tasks should send notifications to it using the xTaskNotifyGive() + * macro, or xTaskNotify() function with the eAction parameter set to + * eIncrement. + * + * ulTaskNotifyTake() can either clear the task's notification value to + * zero on exit, in which case the notification value acts like a binary + * semaphore, or decrement the task's notification value on exit, in which case + * the notification value acts like a counting semaphore. + * + * A task can use ulTaskNotifyTake() to [optionally] block to wait for a + * the task's notification value to be non-zero. The task does not consume any + * CPU time while it is in the Blocked state. + * + * Where as xTaskNotifyWait() will return when a notification is pending, + * ulTaskNotifyTake() will return when the task's notification value is + * not zero. + * + * See http://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's + * notification value is decremented when the function exits. In this way the + * notification value acts like a counting semaphore. If xClearCountOnExit is + * not pdFALSE then the task's notification value is cleared to zero when the + * function exits. In this way the notification value acts like a binary + * semaphore. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for the task's notification value to be greater than zero, + * should the count not already be greater than zero when + * ulTaskNotifyTake() was called. The task will not consume any processing + * time while it is in the Blocked state. This is specified in kernel ticks, + * the macro pdMS_TO_TICSK( value_in_ms ) can be used to convert a time + * specified in milliseconds to a time specified in ticks. + * + * @return The task's notification count before it is either cleared to zero or + * decremented (see the xClearCountOnExit parameter). + * + * \defgroup ulTaskNotifyTake ulTaskNotifyTake + * \ingroup TaskNotifications + */ +uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task. h + *BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );+ * + * If the notification state of the task referenced by the handle xTask is + * eNotified, then set the task's notification state to eNotWaitingNotification. + * The task's notification value is not altered. Set xTask to NULL to clear the + * notification state of the calling task. + * + * @return pdTRUE if the task's notification state was set to + * eNotWaitingNotification, otherwise pdFALSE. + * \defgroup xTaskNotifyStateClear xTaskNotifyStateClear + * \ingroup TaskNotifications + */ +BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ); + +/*----------------------------------------------------------- + * SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES + *----------------------------------------------------------*/ + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Called from the real time kernel tick (either preemptive or cooperative), + * this increments the tick count and checks if any tasks that are blocked + * for a finite period required removing from a blocked list and placing on + * a ready list. If a non-zero value is returned then a context switch is + * required because either: + * + A task was removed from a blocked list because its timeout had expired, + * or + * + Time slicing is in use and there is a task of equal priority to the + * currently running task. + */ +BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes the calling task from the ready list and places it both + * on the list of tasks waiting for a particular event, and the + * list of delayed tasks. The task will be removed from both lists + * and replaced on the ready list should either the event occur (and + * there be no higher priority tasks waiting on the same event) or + * the delay period expires. + * + * The 'unordered' version replaces the event list item value with the + * xItemValue value, and inserts the list item at the end of the list. + * + * The 'ordered' version uses the existing event list item value (which is the + * owning tasks priority) to insert the list item into the event list is task + * priority order. + * + * @param pxEventList The list containing tasks that are blocked waiting + * for the event to occur. + * + * @param xItemValue The item value to use for the event list item when the + * event list is not ordered by task priority. + * + * @param xTicksToWait The maximum amount of time that the task should wait + * for the event to occur. This is specified in kernel ticks,the constant + * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time + * period. + */ +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * This function performs nearly the same function as vTaskPlaceOnEventList(). + * The difference being that this function does not permit tasks to block + * indefinitely, whereas vTaskPlaceOnEventList() does. + * + */ +void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes a task from both the specified event list and the list of blocked + * tasks, and places it on a ready queue. + * + * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called + * if either an event occurs to unblock a task, or the block timeout period + * expires. + * + * xTaskRemoveFromEventList() is used when the event list is in task priority + * order. It removes the list item from the head of the event list as that will + * have the highest priority owning task of all the tasks on the event list. + * vTaskRemoveFromUnorderedEventList() is used when the event list is not + * ordered and the event list items hold something other than the owning tasks + * priority. In this case the event list item value is updated to the value + * passed in the xItemValue parameter. + * + * @return pdTRUE if the task being removed has a higher priority than the task + * making the call, otherwise pdFALSE. + */ +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run. + */ +portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; + +/* + * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY + * THE EVENT BITS MODULE. + */ +TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the calling task. + */ +TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; + +/* + * Capture the current time status for future reference. + */ +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/* + * Compare the time status now with that previously captured to see if the + * timeout has expired. + */ +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * Shortcut used by the queue implementation to prevent unnecessary call to + * taskYIELD(); + */ +void vTaskMissedYield( void ) PRIVILEGED_FUNCTION; + +/* + * Returns the scheduler state as taskSCHEDULER_RUNNING, + * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED. + */ +BaseType_t xTaskGetSchedulerState( void ) PRIVILEGED_FUNCTION; + +/* + * Raises the priority of the mutex holder to that of the calling task should + * the mutex holder have a priority less than the calling task. + */ +BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * Set the priority of a task back to its proper priority in the case that it + * inherited a higher priority while it was holding a semaphore. + */ +BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * If a higher priority task attempting to obtain a mutex caused a lower + * priority task to inherit the higher priority task's priority - but the higher + * priority task then timed out without obtaining the mutex, then the lower + * priority task will disinherit the priority again - but only down as far as + * the highest priority task that is still waiting for the mutex (if there were + * more than one task waiting for the mutex). + */ +void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) PRIVILEGED_FUNCTION; + +/* + * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. + */ +UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* + * Set the uxTaskNumber of the task referenced by the xTask parameter to + * uxHandle. + */ +void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * If tickless mode is being used, or a low power mode is implemented, then + * the tick interrupt will not execute during idle periods. When this is the + * case, the tick count value maintained by the scheduler needs to be kept up + * to date with the actual execution time by being skipped forward by a time + * equal to the idle period. + */ +void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port + * specific sleep function to determine if it is ok to proceed with the sleep, + * and if it is ok to proceed, if it is ok to sleep indefinitely. + * + * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only + * called with the scheduler suspended, not from within a critical section. It + * is therefore possible for an interrupt to request a context switch between + * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being + * entered. eTaskConfirmSleepModeStatus() should be called from a short + * critical section between the timer being stopped and the sleep mode being + * entered to ensure it is ok to proceed into the sleep mode. + */ +eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Increment the mutex held count when a mutex is + * taken and return the handle of the task that has taken the mutex. + */ +TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as vTaskSetTimeOutState(), but without a critial + * section. + */ +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + +#ifdef __cplusplus +} +#endif +#endif /* INC_TASK_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h new file mode 100644 index 0000000..5abb7f1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/include/timers.h @@ -0,0 +1,1295 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef TIMERS_H +#define TIMERS_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include timers.h" +#endif + +/*lint -save -e537 This headers are only multiply included if the application code +happens to also be including task.h. */ +#include "task.h" +/*lint -restore */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * MACROS AND DEFINITIONS + *----------------------------------------------------------*/ + +/* IDs for commands that can be sent/received on the timer queue. These are to +be used solely through the macros that make up the public software timer API, +as defined below. The commands that are sent from interrupts must use the +highest numbers as tmrFIRST_FROM_ISR_COMMAND is used to determine if the task +or interrupt version of the queue send function should be used. */ +#define tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR ( ( BaseType_t ) -2 ) +#define tmrCOMMAND_EXECUTE_CALLBACK ( ( BaseType_t ) -1 ) +#define tmrCOMMAND_START_DONT_TRACE ( ( BaseType_t ) 0 ) +#define tmrCOMMAND_START ( ( BaseType_t ) 1 ) +#define tmrCOMMAND_RESET ( ( BaseType_t ) 2 ) +#define tmrCOMMAND_STOP ( ( BaseType_t ) 3 ) +#define tmrCOMMAND_CHANGE_PERIOD ( ( BaseType_t ) 4 ) +#define tmrCOMMAND_DELETE ( ( BaseType_t ) 5 ) + +#define tmrFIRST_FROM_ISR_COMMAND ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_START_FROM_ISR ( ( BaseType_t ) 6 ) +#define tmrCOMMAND_RESET_FROM_ISR ( ( BaseType_t ) 7 ) +#define tmrCOMMAND_STOP_FROM_ISR ( ( BaseType_t ) 8 ) +#define tmrCOMMAND_CHANGE_PERIOD_FROM_ISR ( ( BaseType_t ) 9 ) + + +/** + * Type by which software timers are referenced. For example, a call to + * xTimerCreate() returns an TimerHandle_t variable that can then be used to + * reference the subject timer in calls to other software timer API functions + * (for example, xTimerStart(), xTimerReset(), etc.). + */ +struct tmrTimerControl; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tmrTimerControl * TimerHandle_t; + +/* + * Defines the prototype to which timer callback functions must conform. + */ +typedef void (*TimerCallbackFunction_t)( TimerHandle_t xTimer ); + +/* + * Defines the prototype to which functions used with the + * xTimerPendFunctionCallFromISR() function must conform. + */ +typedef void (*PendedFunction_t)( void *, uint32_t ); + +/** + * TimerHandle_t xTimerCreate( const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @return If the timer is successfully created then a handle to the newly + * created timer is returned. If the timer cannot be created (because either + * there is insufficient FreeRTOS heap remaining to allocate the timer + * structures, or the timer period was set to 0) then NULL is returned. + * + * Example usage: + * @verbatim + * #define NUM_TIMERS 5 + * + * // An array to hold handles to the created timers. + * TimerHandle_t xTimers[ NUM_TIMERS ]; + * + * // An array to hold a count of the number of times each timer expires. + * int32_t lExpireCounters[ NUM_TIMERS ] = { 0 }; + * + * // Define a callback function that will be used by multiple timer instances. + * // The callback function does nothing but count the number of times the + * // associated timer expires, and stop the timer once the timer has expired + * // 10 times. + * void vTimerCallback( TimerHandle_t pxTimer ) + * { + * int32_t lArrayIndex; + * const int32_t xMaxExpiryCountBeforeStopping = 10; + * + * // Optionally do something if the pxTimer parameter is NULL. + * configASSERT( pxTimer ); + * + * // Which timer expired? + * lArrayIndex = ( int32_t ) pvTimerGetTimerID( pxTimer ); + * + * // Increment the number of times that pxTimer has expired. + * lExpireCounters[ lArrayIndex ] += 1; + * + * // If the timer has expired 10 times then stop it from running. + * if( lExpireCounters[ lArrayIndex ] == xMaxExpiryCountBeforeStopping ) + * { + * // Do not use a block time if calling a timer API function from a + * // timer callback function, as doing so could cause a deadlock! + * xTimerStop( pxTimer, 0 ); + * } + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start some timers. Starting the timers before the scheduler + * // has been started means the timers will start running immediately that + * // the scheduler starts. + * for( x = 0; x < NUM_TIMERS; x++ ) + * { + * xTimers[ x ] = xTimerCreate( "Timer", // Just a text name, not used by the kernel. + * ( 100 * x ), // The timer period in ticks. + * pdTRUE, // The timers will auto-reload themselves when they expire. + * ( void * ) x, // Assign each timer a unique id equal to its array index. + * vTimerCallback // Each timer calls the same callback when it expires. + * ); + * + * if( xTimers[ x ] == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xTimers[ x ], 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +#endif + +/** + * TimerHandle_t xTimerCreateStatic(const char * const pcTimerName, + * TickType_t xTimerPeriodInTicks, + * UBaseType_t uxAutoReload, + * void * pvTimerID, + * TimerCallbackFunction_t pxCallbackFunction, + * StaticTimer_t *pxTimerBuffer ); + * + * Creates a new software timer instance, and returns a handle by which the + * created software timer can be referenced. + * + * Internally, within the FreeRTOS implementation, software timers use a block + * of memory, in which the timer data structure is stored. If a software timer + * is created using xTimerCreate() then the required memory is automatically + * dynamically allocated inside the xTimerCreate() function. (see + * http://www.freertos.org/a00111.html). If a software timer is created using + * xTimerCreateStatic() then the application writer must provide the memory that + * will get used by the software timer. xTimerCreateStatic() therefore allows a + * software timer to be created without using any dynamic memory allocation. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a + * timer into the active state. + * + * @param pcTimerName A text name that is assigned to the timer. This is done + * purely to assist debugging. The kernel itself only ever references a timer + * by its handle, and never by its name. + * + * @param xTimerPeriodInTicks The timer period. The time is defined in tick + * periods so the constant portTICK_PERIOD_MS can be used to convert a time that + * has been specified in milliseconds. For example, if the timer must expire + * after 100 ticks, then xTimerPeriodInTicks should be set to 100. + * Alternatively, if the timer must expire after 500ms, then xPeriod can be set + * to ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than or + * equal to 1000. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the xTimerPeriodInTicks parameter. + * If uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + * + * @param pvTimerID An identifier that is assigned to the timer being created. + * Typically this would be used in the timer callback function to identify which + * timer expired when the same callback function is assigned to more than one + * timer. + * + * @param pxCallbackFunction The function to call when the timer expires. + * Callback functions must have the prototype defined by TimerCallbackFunction_t, + * which is "void vCallbackFunction( TimerHandle_t xTimer );". + * + * @param pxTimerBuffer Must point to a variable of type StaticTimer_t, which + * will be then be used to hold the software timer's data structures, removing + * the need for the memory to be allocated dynamically. + * + * @return If the timer is created then a handle to the created timer is + * returned. If pxTimerBuffer was NULL then NULL is returned. + * + * Example usage: + * @verbatim + * + * // The buffer used to hold the software timer's data structure. + * static StaticTimer_t xTimerBuffer; + * + * // A variable that will be incremented by the software timer's callback + * // function. + * UBaseType_t uxVariableToIncrement = 0; + * + * // A software timer callback function that increments a variable passed to + * // it when the software timer was created. After the 5th increment the + * // callback function stops the software timer. + * static void prvTimerCallback( TimerHandle_t xExpiredTimer ) + * { + * UBaseType_t *puxVariableToIncrement; + * BaseType_t xReturned; + * + * // Obtain the address of the variable to increment from the timer ID. + * puxVariableToIncrement = ( UBaseType_t * ) pvTimerGetTimerID( xExpiredTimer ); + * + * // Increment the variable to show the timer callback has executed. + * ( *puxVariableToIncrement )++; + * + * // If this callback has executed the required number of times, stop the + * // timer. + * if( *puxVariableToIncrement == 5 ) + * { + * // This is called from a timer callback so must not block. + * xTimerStop( xExpiredTimer, staticDONT_BLOCK ); + * } + * } + * + * + * void main( void ) + * { + * // Create the software time. xTimerCreateStatic() has an extra parameter + * // than the normal xTimerCreate() API function. The parameter is a pointer + * // to the StaticTimer_t structure that will hold the software timer + * // structure. If the parameter is passed as NULL then the structure will be + * // allocated dynamically, just as if xTimerCreate() had been called. + * xTimer = xTimerCreateStatic( "T1", // Text name for the task. Helps debugging only. Not used by FreeRTOS. + * xTimerPeriod, // The period of the timer in ticks. + * pdTRUE, // This is an auto-reload timer. + * ( void * ) &uxVariableToIncrement, // A variable incremented by the software timer's callback function + * prvTimerCallback, // The function to execute when the timer expires. + * &xTimerBuffer ); // The buffer that will hold the software timer structure. + * + * // The scheduler has not started yet so a block time is not used. + * xReturned = xTimerStart( xTimer, 0 ); + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timers running as they have already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +/** + * void *pvTimerGetTimerID( TimerHandle_t xTimer ); + * + * Returns the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer, and by calling the + * vTimerSetTimerID() API function. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being queried. + * + * @return The ID assigned to the timer being queried. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ); + * + * Sets the ID assigned to the timer. + * + * IDs are assigned to timers using the pvTimerID parameter of the call to + * xTimerCreated() that was used to create the timer. + * + * If the same callback function is assigned to multiple timers then the timer + * ID can be used as time specific (timer local) storage. + * + * @param xTimer The timer being updated. + * + * @param pvNewID The ID to assign to the timer. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + */ +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ); + * + * Queries a timer to see if it is active or dormant. + * + * A timer will be dormant if: + * 1) It has been created but not started, or + * 2) It is an expired one-shot timer that has not been restarted. + * + * Timers are created in the dormant state. The xTimerStart(), xTimerReset(), + * xTimerStartFromISR(), xTimerResetFromISR(), xTimerChangePeriod() and + * xTimerChangePeriodFromISR() API functions can all be used to transition a timer into the + * active state. + * + * @param xTimer The timer being queried. + * + * @return pdFALSE will be returned if the timer is dormant. A value other than + * pdFALSE will be returned if the timer is active. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is active, do something. + * } + * else + * { + * // xTimer is not active, do something else. + * } + * } + * @endverbatim + */ +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** + * TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ); + * + * Simply returns the handle of the timer service/daemon task. It it not valid + * to call xTimerGetTimerDaemonTaskHandle() before the scheduler has been started. + */ +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * BaseType_t xTimerStart( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStart() starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerStart() has equivalent functionality + * to the xTimerReset() API function. + * + * Starting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerStart() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerStart() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerStart() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStart() + * to be available. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the start command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStart() was called. xTicksToWait is ignored if xTimerStart() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStart( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStop( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerStop() stops a timer that was previously started using either of the + * The xTimerStart(), xTimerReset(), xTimerStartFromISR(), xTimerResetFromISR(), + * xTimerChangePeriod() or xTimerChangePeriodFromISR() API functions. + * + * Stopping a timer ensures the timer is not in the active state. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerStop() + * to be available. + * + * @param xTimer The handle of the timer being stopped. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the stop command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerStop() was called. xTicksToWait is ignored if xTimerStop() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerCreate() API function example usage scenario. + * + */ +#define xTimerStop( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerChangePeriod( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerChangePeriod() changes the period of a timer that was previously + * created using the xTimerCreate() API function. + * + * xTimerChangePeriod() can be called to change the period of an active or + * dormant state timer. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerChangePeriod() to be available. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the change period command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerChangePeriod() was called. xTicksToWait is ignored if + * xTimerChangePeriod() is called before the scheduler is started. + * + * @return pdFAIL will be returned if the change period command could not be + * sent to the timer command queue even after xTicksToWait ticks had passed. + * pdPASS will be returned if the command was successfully sent to the timer + * command queue. When the command is actually processed will depend on the + * priority of the timer service/daemon task relative to other tasks in the + * system. The timer service/daemon task priority is set by the + * configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This function assumes xTimer has already been created. If the timer + * // referenced by xTimer is already active when it is called, then the timer + * // is deleted. If the timer referenced by xTimer is not active when it is + * // called, then the period of the timer is set to 500ms and the timer is + * // started. + * void vAFunction( TimerHandle_t xTimer ) + * { + * if( xTimerIsTimerActive( xTimer ) != pdFALSE ) // or more simply and equivalently "if( xTimerIsTimerActive( xTimer ) )" + * { + * // xTimer is already active - delete it. + * xTimerDelete( xTimer ); + * } + * else + * { + * // xTimer is not active, change its period to 500ms. This will also + * // cause the timer to start. Block for a maximum of 100 ticks if the + * // change period command cannot immediately be sent to the timer + * // command queue. + * if( xTimerChangePeriod( xTimer, 500 / portTICK_PERIOD_MS, 100 ) == pdPASS ) + * { + * // The command was successfully sent. + * } + * else + * { + * // The command could not be sent, even after waiting for 100 ticks + * // to pass. Take appropriate action here. + * } + * } + * } + * @endverbatim + */ + #define xTimerChangePeriod( xTimer, xNewPeriod, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD, ( xNewPeriod ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerDelete( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerDelete() deletes a timer that was previously created using the + * xTimerCreate() API function. + * + * The configUSE_TIMERS configuration constant must be set to 1 for + * xTimerDelete() to be available. + * + * @param xTimer The handle of the timer being deleted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the delete command to be + * successfully sent to the timer command queue, should the queue already be + * full when xTimerDelete() was called. xTicksToWait is ignored if xTimerDelete() + * is called before the scheduler is started. + * + * @return pdFAIL will be returned if the delete command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * + * See the xTimerChangePeriod() API function example usage scenario. + */ +#define xTimerDelete( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_DELETE, 0U, NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerReset( TimerHandle_t xTimer, TickType_t xTicksToWait ); + * + * Timer functionality is provided by a timer service/daemon task. Many of the + * public FreeRTOS timer API functions send commands to the timer service task + * through a queue called the timer command queue. The timer command queue is + * private to the kernel itself and is not directly accessible to application + * code. The length of the timer command queue is set by the + * configTIMER_QUEUE_LENGTH configuration constant. + * + * xTimerReset() re-starts a timer that was previously created using the + * xTimerCreate() API function. If the timer had already been started and was + * already in the active state, then xTimerReset() will cause the timer to + * re-evaluate its expiry time so that it is relative to when xTimerReset() was + * called. If the timer was in the dormant state then xTimerReset() has + * equivalent functionality to the xTimerStart() API function. + * + * Resetting a timer ensures the timer is in the active state. If the timer + * is not stopped, deleted, or reset in the mean time, the callback function + * associated with the timer will get called 'n' ticks after xTimerReset() was + * called, where 'n' is the timers defined period. + * + * It is valid to call xTimerReset() before the scheduler has been started, but + * when this is done the timer will not actually start until the scheduler is + * started, and the timers expiry time will be relative to when the scheduler is + * started, not relative to when xTimerReset() was called. + * + * The configUSE_TIMERS configuration constant must be set to 1 for xTimerReset() + * to be available. + * + * @param xTimer The handle of the timer being reset/started/restarted. + * + * @param xTicksToWait Specifies the time, in ticks, that the calling task should + * be held in the Blocked state to wait for the reset command to be successfully + * sent to the timer command queue, should the queue already be full when + * xTimerReset() was called. xTicksToWait is ignored if xTimerReset() is called + * before the scheduler is started. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue even after xTicksToWait ticks had passed. pdPASS will + * be returned if the command was successfully sent to the timer command queue. + * When the command is actually processed will depend on the priority of the + * timer service/daemon task relative to other tasks in the system, although the + * timers expiry time is relative to when xTimerStart() is actually called. The + * timer service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // When a key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer. + * + * TimerHandle_t xBacklightTimer = NULL; + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press event handler. + * void vKeyPressEventHandler( char cKey ) + * { + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. Wait 10 ticks for the command to be successfully sent + * // if it cannot be sent immediately. + * vSetBacklightState( BACKLIGHT_ON ); + * if( xTimerReset( xBacklightTimer, 100 ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * } + * + * void main( void ) + * { + * int32_t x; + * + * // Create then start the one-shot timer that is responsible for turning + * // the back-light off if no keys are pressed within a 5 second period. + * xBacklightTimer = xTimerCreate( "BacklightTimer", // Just a text name, not used by the kernel. + * ( 5000 / portTICK_PERIOD_MS), // The timer period in ticks. + * pdFALSE, // The timer is a one-shot timer. + * 0, // The id is not used by the callback so can take any value. + * vBacklightTimerCallback // The callback function that switches the LCD back-light off. + * ); + * + * if( xBacklightTimer == NULL ) + * { + * // The timer was not created. + * } + * else + * { + * // Start the timer. No block time is specified, and even if one was + * // it would be ignored because the scheduler has not yet been + * // started. + * if( xTimerStart( xBacklightTimer, 0 ) != pdPASS ) + * { + * // The timer could not be set into the Active state. + * } + * } + * + * // ... + * // Create tasks here. + * // ... + * + * // Starting the scheduler will start the timer running as it has already + * // been set into the active state. + * vTaskStartScheduler(); + * + * // Should not reach here. + * for( ;; ); + * } + * @endverbatim + */ +#define xTimerReset( xTimer, xTicksToWait ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET, ( xTaskGetTickCount() ), NULL, ( xTicksToWait ) ) + +/** + * BaseType_t xTimerStartFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStart() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being started/restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStartFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStartFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStartFromISR() function. If + * xTimerStartFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the start command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerStartFromISR() is actually called. The timer + * service/daemon task priority is set by the configTIMER_TASK_PRIORITY + * configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then restart the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerStartFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The start command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStartFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_START_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerStopFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerStop() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer being stopped. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerStopFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerStopFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerStopFromISR() function. If + * xTimerStopFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the stop command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the timer should be simply stopped. + * + * // The interrupt service routine that stops the timer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - simply stop the timer. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerStopFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The stop command was not executed successfully. Take appropriate + * // action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerStopFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_STOP_FROM_ISR, 0, ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerChangePeriodFromISR( TimerHandle_t xTimer, + * TickType_t xNewPeriod, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerChangePeriod() that can be called from an interrupt + * service routine. + * + * @param xTimer The handle of the timer that is having its period changed. + * + * @param xNewPeriod The new period for xTimer. Timer periods are specified in + * tick periods, so the constant portTICK_PERIOD_MS can be used to convert a time + * that has been specified in milliseconds. For example, if the timer must + * expire after 100 ticks, then xNewPeriod should be set to 100. Alternatively, + * if the timer must expire after 500ms, then xNewPeriod can be set to + * ( 500 / portTICK_PERIOD_MS ) provided configTICK_RATE_HZ is less than + * or equal to 1000. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerChangePeriodFromISR() writes a message to the + * timer command queue, so has the potential to transition the timer service/ + * daemon task out of the Blocked state. If calling xTimerChangePeriodFromISR() + * causes the timer service/daemon task to leave the Blocked state, and the + * timer service/daemon task has a priority equal to or greater than the + * currently executing task (the task that was interrupted), then + * *pxHigherPriorityTaskWoken will get set to pdTRUE internally within the + * xTimerChangePeriodFromISR() function. If xTimerChangePeriodFromISR() sets + * this value to pdTRUE then a context switch should be performed before the + * interrupt exits. + * + * @return pdFAIL will be returned if the command to change the timers period + * could not be sent to the timer command queue. pdPASS will be returned if the + * command was successfully sent to the timer command queue. When the command + * is actually processed will depend on the priority of the timer service/daemon + * task relative to other tasks in the system. The timer service/daemon task + * priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xTimer has already been created and started. When + * // an interrupt occurs, the period of xTimer should be changed to 500ms. + * + * // The interrupt service routine that changes the period of xTimer. + * void vAnExampleInterruptServiceRoutine( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // The interrupt has occurred - change the period of xTimer to 500ms. + * // xHigherPriorityTaskWoken was set to pdFALSE where it was defined + * // (within this function). As this is an interrupt service routine, only + * // FreeRTOS API functions that end in "FromISR" can be used. + * if( xTimerChangePeriodFromISR( xTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The command to change the timers period was not executed + * // successfully. Take appropriate action here. + * } + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerChangePeriodFromISR( xTimer, xNewPeriod, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_CHANGE_PERIOD_FROM_ISR, ( xNewPeriod ), ( pxHigherPriorityTaskWoken ), 0U ) + +/** + * BaseType_t xTimerResetFromISR( TimerHandle_t xTimer, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * A version of xTimerReset() that can be called from an interrupt service + * routine. + * + * @param xTimer The handle of the timer that is to be started, reset, or + * restarted. + * + * @param pxHigherPriorityTaskWoken The timer service/daemon task spends most + * of its time in the Blocked state, waiting for messages to arrive on the timer + * command queue. Calling xTimerResetFromISR() writes a message to the timer + * command queue, so has the potential to transition the timer service/daemon + * task out of the Blocked state. If calling xTimerResetFromISR() causes the + * timer service/daemon task to leave the Blocked state, and the timer service/ + * daemon task has a priority equal to or greater than the currently executing + * task (the task that was interrupted), then *pxHigherPriorityTaskWoken will + * get set to pdTRUE internally within the xTimerResetFromISR() function. If + * xTimerResetFromISR() sets this value to pdTRUE then a context switch should + * be performed before the interrupt exits. + * + * @return pdFAIL will be returned if the reset command could not be sent to + * the timer command queue. pdPASS will be returned if the command was + * successfully sent to the timer command queue. When the command is actually + * processed will depend on the priority of the timer service/daemon task + * relative to other tasks in the system, although the timers expiry time is + * relative to when xTimerResetFromISR() is actually called. The timer service/daemon + * task priority is set by the configTIMER_TASK_PRIORITY configuration constant. + * + * Example usage: + * @verbatim + * // This scenario assumes xBacklightTimer has already been created. When a + * // key is pressed, an LCD back-light is switched on. If 5 seconds pass + * // without a key being pressed, then the LCD back-light is switched off. In + * // this case, the timer is a one-shot timer, and unlike the example given for + * // the xTimerReset() function, the key press event handler is an interrupt + * // service routine. + * + * // The callback function assigned to the one-shot timer. In this case the + * // parameter is not used. + * void vBacklightTimerCallback( TimerHandle_t pxTimer ) + * { + * // The timer expired, therefore 5 seconds must have passed since a key + * // was pressed. Switch off the LCD back-light. + * vSetBacklightState( BACKLIGHT_OFF ); + * } + * + * // The key press interrupt service routine. + * void vKeyPressEventInterruptHandler( void ) + * { + * BaseType_t xHigherPriorityTaskWoken = pdFALSE; + * + * // Ensure the LCD back-light is on, then reset the timer that is + * // responsible for turning the back-light off after 5 seconds of + * // key inactivity. This is an interrupt service routine so can only + * // call FreeRTOS API functions that end in "FromISR". + * vSetBacklightState( BACKLIGHT_ON ); + * + * // xTimerStartFromISR() or xTimerResetFromISR() could be called here + * // as both cause the timer to re-calculate its expiry time. + * // xHigherPriorityTaskWoken was initialised to pdFALSE when it was + * // declared (in this function). + * if( xTimerResetFromISR( xBacklightTimer, &xHigherPriorityTaskWoken ) != pdPASS ) + * { + * // The reset command was not executed successfully. Take appropriate + * // action here. + * } + * + * // Perform the rest of the key processing here. + * + * // If xHigherPriorityTaskWoken equals pdTRUE, then a context switch + * // should be performed. The syntax required to perform a context switch + * // from inside an ISR varies from port to port, and from compiler to + * // compiler. Inspect the demos for the port you are using to find the + * // actual syntax required. + * if( xHigherPriorityTaskWoken != pdFALSE ) + * { + * // Call the interrupt safe yield function here (actual function + * // depends on the FreeRTOS port being used). + * } + * } + * @endverbatim + */ +#define xTimerResetFromISR( xTimer, pxHigherPriorityTaskWoken ) xTimerGenericCommand( ( xTimer ), tmrCOMMAND_RESET_FROM_ISR, ( xTaskGetTickCountFromISR() ), ( pxHigherPriorityTaskWoken ), 0U ) + + +/** + * BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * BaseType_t *pxHigherPriorityTaskWoken ); + * + * + * Used from application interrupt service routines to defer the execution of a + * function to the RTOS daemon task (the timer service task, hence this function + * is implemented in timers.c and is prefixed with 'Timer'). + * + * Ideally an interrupt service routine (ISR) is kept as short as possible, but + * sometimes an ISR either has a lot of processing to do, or needs to perform + * processing that is not deterministic. In these cases + * xTimerPendFunctionCallFromISR() can be used to defer processing of a function + * to the RTOS daemon task. + * + * A mechanism is provided that allows the interrupt to return directly to the + * task that will subsequently execute the pended callback function. This + * allows the callback function to execute contiguously in time with the + * interrupt - just as if the callback had executed in the interrupt itself. + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param pxHigherPriorityTaskWoken As mentioned above, calling this function + * will result in a message being sent to the timer daemon task. If the + * priority of the timer daemon task (which is set using + * configTIMER_TASK_PRIORITY in FreeRTOSConfig.h) is higher than the priority of + * the currently running task (the task the interrupt interrupted) then + * *pxHigherPriorityTaskWoken will be set to pdTRUE within + * xTimerPendFunctionCallFromISR(), indicating that a context switch should be + * requested before the interrupt exits. For that reason + * *pxHigherPriorityTaskWoken must be initialised to pdFALSE. See the + * example code below. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + * Example usage: + * @verbatim + * + * // The callback function that will execute in the context of the daemon task. + * // Note callback functions must all use this same prototype. + * void vProcessInterface( void *pvParameter1, uint32_t ulParameter2 ) + * { + * BaseType_t xInterfaceToService; + * + * // The interface that requires servicing is passed in the second + * // parameter. The first parameter is not used in this case. + * xInterfaceToService = ( BaseType_t ) ulParameter2; + * + * // ...Perform the processing here... + * } + * + * // An ISR that receives data packets from multiple interfaces + * void vAnISR( void ) + * { + * BaseType_t xInterfaceToService, xHigherPriorityTaskWoken; + * + * // Query the hardware to determine which interface needs processing. + * xInterfaceToService = prvCheckInterfaces(); + * + * // The actual processing is to be deferred to a task. Request the + * // vProcessInterface() callback function is executed, passing in the + * // number of the interface that needs processing. The interface to + * // service is passed in the second parameter. The first parameter is + * // not used in this case. + * xHigherPriorityTaskWoken = pdFALSE; + * xTimerPendFunctionCallFromISR( vProcessInterface, NULL, ( uint32_t ) xInterfaceToService, &xHigherPriorityTaskWoken ); + * + * // If xHigherPriorityTaskWoken is now set to pdTRUE then a context + * // switch should be requested. The macro used is port specific and will + * // be either portYIELD_FROM_ISR() or portEND_SWITCHING_ISR() - refer to + * // the documentation page for the port being used. + * portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + * + * } + * @endverbatim + */ +BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; + + /** + * BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, + * void *pvParameter1, + * uint32_t ulParameter2, + * TickType_t xTicksToWait ); + * + * + * Used to defer the execution of a function to the RTOS daemon task (the timer + * service task, hence this function is implemented in timers.c and is prefixed + * with 'Timer'). + * + * @param xFunctionToPend The function to execute from the timer service/ + * daemon task. The function must conform to the PendedFunction_t + * prototype. + * + * @param pvParameter1 The value of the callback function's first parameter. + * The parameter has a void * type to allow it to be used to pass any type. + * For example, unsigned longs can be cast to a void *, or the void * can be + * used to point to a structure. + * + * @param ulParameter2 The value of the callback function's second parameter. + * + * @param xTicksToWait Calling this function will result in a message being + * sent to the timer daemon task on a queue. xTicksToWait is the amount of + * time the calling task should remain in the Blocked state (so not using any + * processing time) for space to become available on the timer queue if the + * queue is found to be full. + * + * @return pdPASS is returned if the message was successfully sent to the + * timer daemon task, otherwise pdFALSE is returned. + * + */ +BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * const char * const pcTimerGetName( TimerHandle_t xTimer ); + * + * Returns the name that was assigned to a timer when the timer was created. + * + * @param xTimer The handle of the timer being queried. + * + * @return The name assigned to the timer specified by the xTimer parameter. + */ +const char * pcTimerGetName( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ); + * + * Updates a timer to be either an autoreload timer, in which case the timer + * automatically resets itself each time it expires, or a one shot timer, in + * which case the timer will only expire once unless it is manually restarted. + * + * @param xTimer The handle of the timer being updated. + * + * @param uxAutoReload If uxAutoReload is set to pdTRUE then the timer will + * expire repeatedly with a frequency set by the timer's period (see the + * xTimerPeriodInTicks parameter of the xTimerCreate() API function). If + * uxAutoReload is set to pdFALSE then the timer will be a one-shot timer and + * enter the dormant state after it expires. + */ +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION; + +/** + * TickType_t xTimerGetPeriod( TimerHandle_t xTimer ); + * + * Returns the period of a timer. + * + * @param xTimer The handle of the timer being queried. + * + * @return The period of the timer in ticks. + */ +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/** +* TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ); +* +* Returns the time in ticks at which the timer will expire. If this is less +* than the current tick count then the expiry time has overflowed from the +* current time. +* +* @param xTimer The handle of the timer being queried. +* +* @return If the timer is running then the time in ticks at which the timer +* will next expire is returned. If the timer is not running then the return +* value is undefined. +*/ +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + +/* + * Functions beyond this part are not part of the public API and are intended + * for use by the kernel only. + */ +BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +#if( configUSE_TRACE_FACILITY == 1 ) + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* TIMERS_H */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c new file mode 100644 index 0000000..9875b90 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/list.c @@ -0,0 +1,198 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#include+#include "FreeRTOS.h" +#include "list.h" + +/*----------------------------------------------------------- + * PUBLIC LIST API documented in list.h + *----------------------------------------------------------*/ + +void vListInitialise( List_t * const pxList ) +{ + /* The list structure contains a list item which is used to mark the + end of the list. To initialise the list the list end is inserted + as the only list entry. */ + pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + /* The list end value is the highest possible value in the list to + ensure it remains at the end of the list. */ + pxList->xListEnd.xItemValue = portMAX_DELAY; + + /* The list end next and previous pointers point to itself so we know + when the list is empty. */ + pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd );/*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + pxList->uxNumberOfItems = ( UBaseType_t ) 0U; + + /* Write known values into the list if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ); + listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ); +} +/*-----------------------------------------------------------*/ + +void vListInitialiseItem( ListItem_t * const pxItem ) +{ + /* Make sure the list item is not recorded as being on a list. */ + pxItem->pxContainer = NULL; + + /* Write known values into the list item if + configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); + listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); +} +/*-----------------------------------------------------------*/ + +void vListInsertEnd( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert a new list item into pxList, but rather than sort the list, + makes the new list item the last item to be removed by a call to + listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +void vListInsert( List_t * const pxList, ListItem_t * const pxNewListItem ) +{ +ListItem_t *pxIterator; +const TickType_t xValueOfInsertion = pxNewListItem->xItemValue; + + /* Only effective when configASSERT() is also defined, these tests may catch + the list data structures being overwritten in memory. They will not catch + data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert the new list item into the list, sorted in xItemValue order. + + If the list already contains a list item with the same item value then the + new list item should be placed after it. This ensures that TCBs which are + stored in ready lists (all of which have the same xItemValue value) get a + share of the CPU. However, if the xItemValue is the same as the back marker + the iteration loop below will not end. Therefore the value is checked + first, and the algorithm slightly modified if necessary. */ + if( xValueOfInsertion == portMAX_DELAY ) + { + pxIterator = pxList->xListEnd.pxPrevious; + } + else + { + /* *** NOTE *********************************************************** + If you find your application is crashing here then likely causes are + listed below. In addition see https://www.freertos.org/FAQHelp.html for + more tips, and ensure configASSERT() is defined! + https://www.freertos.org/a00110.html#configASSERT + + 1) Stack overflow - + see https://www.freertos.org/Stacks-and-stack-overflow-checking.html + 2) Incorrect interrupt priority assignment, especially on Cortex-M + parts where numerically high priority values denote low actual + interrupt priorities, which can seem counter intuitive. See + https://www.freertos.org/RTOS-Cortex-M3-M4.html and the definition + of configMAX_SYSCALL_INTERRUPT_PRIORITY on + https://www.freertos.org/a00110.html + 3) Calling an API function from within a critical section or when + the scheduler is suspended, or calling an API function that does + not end in "FromISR" from an interrupt. + 4) Using a queue or semaphore before it has been initialised or + before the scheduler has been started (are interrupts firing + before vTaskStartScheduler() has been called?). + **********************************************************************/ + + for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */ + { + /* There is nothing to do here, just iterating to the wanted + insertion position. */ + } + } + + pxNewListItem->pxNext = pxIterator->pxNext; + pxNewListItem->pxNext->pxPrevious = pxNewListItem; + pxNewListItem->pxPrevious = pxIterator; + pxIterator->pxNext = pxNewListItem; + + /* Remember which list the item is in. This allows fast removal of the + item later. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) +{ +/* The list item knows which list it is in. Obtain the list from the list +item. */ +List_t * const pxList = pxItemToRemove->pxContainer; + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pxContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; +} +/*-----------------------------------------------------------*/ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c new file mode 100644 index 0000000..b5f5b65 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/port.c @@ -0,0 +1,765 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/*----------------------------------------------------------- + * Implementation of functions defined in portable.h for the ARM CM4F port. + *----------------------------------------------------------*/ + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +#ifndef __VFP_FP__ + #error This port can only be used when the project options are configured to enable hardware floating point support. +#endif + +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ configCPU_CLOCK_HZ + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#else + /* The way the SysTick is clocked is not modified in case it is not the same + as the core. */ + #define portNVIC_SYSTICK_CLK_BIT ( 0 ) +#endif + +/* Constants required to manipulate the core. Registers first... */ +#define portNVIC_SYSTICK_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( * ( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( * ( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SYSPRI2_REG ( * ( ( volatile uint32_t * ) 0xe000ed20 ) ) +/* ...then bits in the registers. */ +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PENDSVCLEAR_BIT ( 1UL << 27UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) + +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) + +/* Constants required to check the validity of an interrupt priority. */ +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( * ( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) + +/* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ +#define portVECTACTIVE_MASK ( 0xFFUL ) + +/* Constants required to manipulate the VFP. */ +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating point context control register. */ +#define portASPEN_AND_LSPEN_BITS ( 0x3UL << 30UL ) + +/* Constants required to set up the initial stack. */ +#define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffd ) + +/* The systick is a 24-bit counter. */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/* For strict compliance with the Cortex-M spec the task start address should +have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ +#define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* A fiddle factor to estimate the number of SysTick counts that would have +occurred while the SysTick counter is stopped during tickless idle +calculations. */ +#define portMISSED_COUNTS_FACTOR ( 45UL ) + +/* Let the user override the pre-loading of the initial LR with the address of +prvTaskExitError() in case it messes up unwinding of the stack in the +debugger. */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/* + * Setup the timer to generate the tick interrupts. The implementation in this + * file is weak to allow application writers to change the timer used to + * generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ); + +/* + * Exception handlers. + */ +void xPortPendSVHandler( void ) __attribute__ (( naked )); +void xPortSysTickHandler( void ); +void vPortSVCHandler( void ) __attribute__ (( naked )); + +/* + * Start first task is a separate function so it can be tested in isolation. + */ +static void prvPortStartFirstTask( void ) __attribute__ (( naked )); + +/* + * Function to enable the VFP. + */ +static void vPortEnableVFP( void ) __attribute__ (( naked )); + +/* + * Used to catch tasks that attempt to return from their implementing function. + */ +static void prvTaskExitError( void ); + +/*-----------------------------------------------------------*/ + +/* Each task maintains its own interrupt status in the critical nesting +variable. */ +static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; + +/* + * The number of SysTick increments that make up one tick period. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulTimerCountsForOneTick = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * The maximum number of tick periods that can be suppressed is limited by the + * 24 bit resolution of the SysTick timer. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t xMaximumPossibleSuppressedTicks = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Compensate for the CPU cycles that pass while the SysTick is stopped (low + * power functionality only. + */ +#if( configUSE_TICKLESS_IDLE == 1 ) + static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ + +/* + * Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if( configASSERT_DEFINED == 1 ) + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * const ) portNVIC_IP_REGISTERS_OFFSET_16; +#endif /* configASSERT_DEFINED */ + +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + /* Simulate the stack frame as it would be created by a context switch + interrupt. */ + + /* Offset added to account for the way the MCU uses the stack on entry/exit + of interrupts, and to ensure alignment. */ + pxTopOfStack--; + + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + + /* Save code space by skipping register initialisation. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + + /* A save method is being used that requires each task to maintain its + own exec return value. */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; + + pxTopOfStack -= 8; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ +volatile uint32_t ulDummy = 0; + + /* A function that implements a task must not exit or attempt to return to + its caller as there is nothing to return to. If a task wants to exit it + should instead call vTaskDelete( NULL ). + + Artificially force an assert() to be triggered if configASSERT() is + defined, then stop here so application writers can catch the error. */ + configASSERT( uxCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + started to remove a compiler warning about the function being defined + but never called. ulDummy is used purely to quieten other warnings + about code appearing after this function is called - making ulDummy + volatile makes the compiler think the function could return and + therefore not output an 'unreachable code' warning for code that appears + after it. */ + } +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler( void ) +{ + __asm volatile ( + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + ); +} +/*-----------------------------------------------------------*/ + +static void prvPortStartFirstTask( void ) +{ + /* Start the first task. This also clears the bit that indicates the FPU is + in use in case the FPU was used before the scheduler was started - which + would otherwise result in the unnecessary leaving of space in the SVC stack + for lazy saving of FPU registers. */ + __asm volatile( + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr control, r0 \n" + " cpsie i \n" /* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc 0 \n" /* System call to start first task. */ + " nop \n" + ); +} +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. + See http://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); + + #if( configASSERT_DEFINED == 1 ) + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + functions can be called. ISR safe functions are those that end in + "FromISR". FreeRTOS maintains separate thread and ISR API functions to + ensure interrupt entry is as fast and simple as possible. + + Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; + + /* Determine the number of priority bits available. First write to all + possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Calculate the maximum acceptable priority group value for the number + of bits read back. */ + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulMaxPRIGROUPValue--; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + priority bits matches the number of priority bits actually queried + from the hardware. */ + configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } + #endif /* conifgASSERT_DEFINED */ + + /* Make PendSV and SysTick the lowest priority interrupts. */ + portNVIC_SYSPRI2_REG |= portNVIC_PENDSV_PRI; + portNVIC_SYSPRI2_REG |= portNVIC_SYSTICK_PRI; + + /* Start the timer that generates the tick ISR. Interrupts are disabled + here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + /* Ensure the VFP is enabled - it should be anyway. */ + vPortEnableVFP(); + + /* Lazy save always. */ + *( portFPCCR ) |= portASPEN_AND_LSPEN_BITS; + + /* Start the first task. */ + prvPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + exit error function to prevent compiler warnings about a static function + not being called in the case that the application writer overrides this + functionality by defining configTASK_RETURN_ADDRESS. Call + vTaskSwitchContext() so link time optimisation does not remove the + symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Not implemented in ports where there is nothing to return to. + Artificially force an assert. */ + configASSERT( uxCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) +{ + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + assert() if it is being called from an interrupt context. Only API + functions that end in "FromISR" can be used in an interrupt. Only assert if + the critical nesting count is 1 to protect against recursive calls if the + assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) +{ + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void xPortPendSVHandler( void ) +{ + /* This is a naked function. */ + + __asm volatile + ( + " mrs r0, psp \n" + " isb \n" + " \n" + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ + " ldr r2, [r3] \n" + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n" + " \n" + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ + " \n" + " stmdb sp!, {r0, r3} \n" + " mov r0, %0 \n" + " cpsid i \n" /* Errata workaround. */ + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " cpsie i \n" /* Errata workaround. */ + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " ldmia sp!, {r0, r3} \n" + " \n" + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r0, [r1] \n" + " \n" + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ + " \n" + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n" + " \n" + " msr psp, r0 \n" + " isb \n" + " \n" + #ifdef WORKAROUND_PMU_CM001 /* XMC4000 specific errata workaround. */ + #if WORKAROUND_PMU_CM001 == 1 + " push { r14 } \n" + " pop { pc } \n" + #endif + #endif + " \n" + " bx r14 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + ::"i"(configMAX_SYSCALL_INTERRUPT_PRIORITY) + ); +} +/*-----------------------------------------------------------*/ + +void xPortSysTickHandler( void ) +{ + /* The SysTick runs at the lowest interrupt priority, so when this interrupt + executes all interrupts must be unmasked. There is therefore no need to + save and then restore the interrupt mask value as its value is already + known. */ + portDISABLE_INTERRUPTS(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* A context switch is required. Context switching is performed in + the PendSV interrupt. Pend the PendSV interrupt. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portENABLE_INTERRUPTS(); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE == 1 ) + + __attribute__((weak)) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Stop the SysTick momentarily. The time the SysTick is stopped for + is accounted for as best it can be, but using the tickless mode will + inevitably result in some tiny drift of the time maintained by the + kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT; + + /* Calculate the reload value required to wait xExpectedIdleTime + tick periods. -1 is used because this code will execute part way + through one of the tick periods. */ + ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + method as that will mask interrupts that should exit sleep mode. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Restart from whatever is left in the count register to complete + this tick period. */ + portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Reset the reload register to the value required for normal tick + periods. */ + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Re-enable interrupts - see comments above the cpsid instruction() + above. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + else + { + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + set its parameter to 0 to indicate that its implementation contains + its own wait for interrupt or wait for event instruction, and so wfi + should not be executed again. However, the original expected idle + time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( &xModifiableIdleTime ); + if( xModifiableIdleTime > 0 ) + { + __asm volatile( "dsb" ::: "memory" ); + __asm volatile( "wfi" ); + __asm volatile( "isb" ); + } + configPOST_SLEEP_PROCESSING( &xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + out of sleep mode to execute immediately. see comments above + __disable_interrupt() call above. */ + __asm volatile( "cpsie i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + and interrupts that execute while the clock is stopped will increase + any slippage between the time maintained by the RTOS and calendar + time. */ + __asm volatile( "cpsid i" ::: "memory" ); + __asm volatile( "dsb" ); + __asm volatile( "isb" ); + + /* Disable the SysTick clock without reading the + portNVIC_SYSTICK_CTRL_REG register to ensure the + portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + the time the SysTick is stopped for is accounted for as best it can + be, but using the tickless mode will inevitably result in some tiny + drift of the time maintained by the kernel with respect to calendar + time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT ); + + /* Determine if the SysTick clock has already counted to zero and + been set back to the current reload value (the reload back being + correct for the entire expected idle time) or if the SysTick is yet + to count to zero (in which case an interrupt other than the SysTick + must have brought the system out of sleep mode). */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt is already pending, and the SysTick count + reloaded with ulReloadValue. Reset the + portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick + period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + underflowed because the post sleep hook did something + that took too long. */ + if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + function exits, the tick value maintained by the tick is stepped + forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. + Work out how long the sleep lasted rounded to complete tick + periods (not the ulReload value which accounted for part + ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG; + + /* How many complete tick periods passed while the processor + was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG + again, then set portNVIC_SYSTICK_LOAD_REG back to its standard + value. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + vTaskStepTick( ulCompleteTickPeriods ); + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + + /* Exit with interrpts enabled. */ + __asm volatile( "cpsie i" ::: "memory" ); + } + } + +#endif /* #if configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +/* + * Setup the systick timer to generate the tick interrupts at the required + * frequency. + */ +__attribute__(( weak )) void vPortSetupTimerInterrupt( void ) +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and clear the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT ); +} +/*-----------------------------------------------------------*/ + +/* This is a naked function. */ +static void vPortEnableVFP( void ) +{ + __asm volatile + ( + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ + " ldr r1, [r0] \n" + " \n" + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ + " str r1, [r0] \n" + " bx r14 " + ); +} +/*-----------------------------------------------------------*/ + +#if( configASSERT_DEFINED == 1 ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + an interrupt that has been assigned a priority above + configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + function. ISR safe FreeRTOS API functions must *only* be called + from interrupts that have been assigned a priority at or below + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Numerically low interrupt priority numbers represent logically high + interrupt priorities, therefore the priority of the interrupt must + be set to a value equal to or numerically *higher* than + configMAX_SYSCALL_INTERRUPT_PRIORITY. + + Interrupts that use the FreeRTOS API must not be left at their + default priority of zero as that is the highest possible priority, + which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + and therefore also guaranteed to be invalid. + + FreeRTOS maintains separate thread and ISR API functions to ensure + interrupt entry is as fast and simple as possible. + + The following links provide detailed information: + http://www.freertos.org/RTOS-Cortex-M3-M4.html + http://www.freertos.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + that define each interrupt's priority to be split between bits that + define the interrupt's pre-emption priority bits and bits that define + the interrupt's sub-priority. For simplicity all bits must be defined + to be pre-emption priority bits. The following assertion will fail if + this is not the case (if some bits represent a sub-priority). + + If the application only uses CMSIS libraries for interrupt + configuration then the correct setting can be achieved on all Cortex-M + devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + scheduler. Note however that some vendor specific peripheral libraries + assume a non-zero priority group setting, in which cases using a value + of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* configASSERT_DEFINED */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h new file mode 100644 index 0000000..1d7d653 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -0,0 +1,247 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*----------------------------------------------------------- + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the + * given hardware and compiler. + * + * These settings should not be altered. + *----------------------------------------------------------- + */ + +/* Type definitions. */ +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specifics. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() \ +{ \ + /* Set a PendSV to request a context switch. */ \ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; \ + \ + /* Barriers are normally not required but do ensure the code is completely \ + within the specified behaviour for the architecture. */ \ + __asm volatile( "dsb" ::: "memory" ); \ + __asm volatile( "isb" ); \ +} + +#define portNVIC_INT_CTRL_REG ( * ( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD() +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* Critical section management. */ +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortRaiseBASEPRI() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) vPortSetBASEPRI(x) +#define portDISABLE_INTERRUPTS() vPortRaiseBASEPRI() +#define portENABLE_INTERRUPTS() vPortSetBASEPRI(0) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + +/* Tickless idle/low power functionality. */ +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Generic helper function. */ + __attribute__( ( always_inline ) ) static inline uint8_t ucPortCountLeadingZeros( uint32_t ulBitmap ) + { + uint8_t ucReturn; + + __asm volatile ( "clz %0, %1" : "=r" ( ucReturn ) : "r" ( ulBitmap ) : "memory" ); + return ucReturn; + } + + /* Check the configuration. */ + #if( configMAX_PRIORITIES > 32 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 difference priorities as tasks that share a priority will time slice. + #endif + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) ucPortCountLeadingZeros( ( uxReadyPriorities ) ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif + +/* portNOP() is not required by this port. */ +#define portNOP() + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__(( always_inline)) +#endif + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ +uint32_t ulCurrentInterrupt; +BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile( "mrs %0, ipsr" : "=r"( ulCurrentInterrupt ) :: "memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortRaiseBASEPRI( void ) +{ +uint32_t ulNewBASEPRI; + + __asm volatile + ( + " mov %0, %1 \n" \ + " cpsid i \n" \ + " msr basepri, %0 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static uint32_t ulPortRaiseBASEPRI( void ) +{ +uint32_t ulOriginalBASEPRI, ulNewBASEPRI; + + __asm volatile + ( + " mrs %0, basepri \n" \ + " mov %1, %2 \n" \ + " cpsid i \n" \ + " msr basepri, %1 \n" \ + " isb \n" \ + " dsb \n" \ + " cpsie i \n" \ + :"=r" (ulOriginalBASEPRI), "=r" (ulNewBASEPRI) : "i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); + + /* This return will not be reached but is necessary to prevent compiler + warnings. */ + return ulOriginalBASEPRI; +} +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static void vPortSetBASEPRI( uint32_t ulNewMaskValue ) +{ + __asm volatile + ( + " msr basepri, %0 " :: "r" ( ulNewMaskValue ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) + +#ifdef __cplusplus +} +#endif + +#endif /* PORTMACRO_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c new file mode 100644 index 0000000..23714eb --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/portable/MemMang/heap_4.c @@ -0,0 +1,436 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* + * A sample implementation of pvPortMalloc() and vPortFree() that combines + * (coalescences) adjacent memory blocks as they are freed, and in so doing + * limits memory fragmentation. + * + * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the + * memory management pages of http://www.FreeRTOS.org for more information. + */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) + #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0 +#endif + +/* Block sizes must not get too small. */ +#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define heapBITS_PER_BYTE ( ( size_t ) 8 ) + +/* Allocate the memory for the heap. */ +#if( configAPPLICATION_ALLOCATED_HEAP == 1 ) + /* The application writer has already defined the array used for the RTOS + heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#else + static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/* Define the linked list structure. This is used to link free blocks in order +of their memory address. */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ + size_t xBlockSize; /*<< The size of the free block. */ +} BlockLink_t; + +/*-----------------------------------------------------------*/ + +/* + * Inserts a block of memory that is being freed into the correct position in + * the list of free memory blocks. The block being freed will be merged with + * the block in front it and/or the block behind it if the memory blocks are + * adjacent to each other. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); + +/* + * Called automatically to setup the required heap structures the first time + * pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/*-----------------------------------------------------------*/ + +/* The size of the structure placed at the beginning of each allocated memory +block must by correctly byte aligned. */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + +/* Create a couple of list links to mark the start and end of the list. */ +static BlockLink_t xStart, *pxEnd = NULL; + +/* Keeps track of the number of free bytes remaining, but says nothing about +fragmentation. */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize +member of an BlockLink_t structure is set then the block belongs to the +application. When the bit is free the block is still part of the free heap +space. */ +static size_t xBlockAllocatedBit = 0; + +/*-----------------------------------------------------------*/ + +void *pvPortMalloc( size_t xWantedSize ) +{ +BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; +void *pvReturn = NULL; + + vTaskSuspendAll(); + { + /* If this is the first call to malloc then the heap will require + initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is + set. The top bit of the block size member of the BlockLink_t structure + is used to determine who owns the block - the application or the + kernel, so it must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); + configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size + was not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + block following the number of bytes requested. The void + cast is used to prevent byte alignment warnings from the + compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the + single block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned + by the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + } + ( void ) xTaskResumeAll(); + + #if( configUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void *pv ) +{ +uint8_t *puc = ( uint8_t * ) pv; +BlockLink_t *pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + configASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + configASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + vTaskSuspendAll(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +void vPortInitialiseBlocks( void ) +{ + /* This just exists to keep the linker quiet. */ +} +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ +BlockLink_t *pxFirstFreeBlock; +uint8_t *pucAlignedHeap; +size_t uxAddress; +size_t xTotalHeapSize = configTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( portBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) portBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) +{ +BlockLink_t *pxIterator; +uint8_t *puc; + + /* Iterate through the list until a block is found that has a higher address + than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + before and the block after, then it's pxNextFreeBlock pointer will have + already been set, and should not be set here as that would make it point + to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c new file mode 100644 index 0000000..6f79c92 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/queue.c @@ -0,0 +1,2941 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" + +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + + +/* Constants used with the cRxLock and cTxLock structure members. */ +#define queueUNLOCKED ( ( int8_t ) -1 ) +#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + +/* When the Queue_t structure is used to represent a base queue its pcHead and +pcTail members are used as pointers into the queue storage area. When the +Queue_t structure is used to represent a mutex pcHead and pcTail pointers are +not necessary, and the pcHead pointer is set to NULL to indicate that the +structure instead holds a pointer to the mutex holder (if any). Map alternative +names to the pcHead and structure member to ensure the readability of the code +is maintained. The QueuePointers_t and SemaphoreData_t types are used to form +a union as their usage is mutually exclusive dependent on what the queue is +being used for. */ +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL + +typedef struct QueuePointers +{ + int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ +} QueuePointers_t; + +typedef struct SemaphoreData +{ + TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ +} SemaphoreData_t; + +/* Semaphores do not actually store or copy data, so have an item size of +zero. */ +#define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 ) +#define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U ) + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define queueYIELD_IF_USING_PREEMPTION() +#else + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* + * Definition of the queue used by the scheduler. + * Items are queued by copy, not reference. See the following link for the + * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html + */ +typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + int8_t *pcHead; /*< Points to the beginning of the queue storage area. */ + int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */ + + union + { + QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + } u; + + List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + + volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */ + UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + struct QueueDefinition *pxQueueSetContainer; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif + +} xQUEUE; + +/* The old xQUEUE name is maintained above then typedefed to the new Queue_t +name below to enable the use of older kernel aware debuggers. */ +typedef xQUEUE Queue_t; + +/*-----------------------------------------------------------*/ + +/* + * The queue registry is just a means for kernel aware debuggers to locate + * queue structures. It has no other purpose so is an optional component. + */ +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + /* The type stored within the queue registry array. This allows a name + to be assigned to each queue making kernel aware debugging a little + more user friendly. */ + typedef struct QUEUE_REGISTRY_ITEM + { + const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + QueueHandle_t xHandle; + } xQueueRegistryItem; + + /* The old xQueueRegistryItem name is maintained above then typedefed to the + new xQueueRegistryItem name below to enable the use of older kernel aware + debuggers. */ + typedef xQueueRegistryItem QueueRegistryItem_t; + + /* The queue registry is simply an array of QueueRegistryItem_t structures. + The pcQueueName member of a structure being NULL is indicative of the + array position being vacant. */ + PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + +#endif /* configQUEUE_REGISTRY_SIZE */ + +/* + * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not + * prevent an ISR from adding or removing items to the queue, but does prevent + * an ISR from removing tasks from the queue event lists. If an ISR finds a + * queue is locked it will instead increment the appropriate queue lock count + * to indicate that a task may require unblocking. When the queue in unlocked + * these lock counts are inspected, and the appropriate action taken. + */ +static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any data in a queue. + * + * @return pdTRUE if the queue contains no items, otherwise pdFALSE. + */ +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Uses a critical section to determine if there is any space in a queue. + * + * @return pdTRUE if there is no space, otherwise pdFALSE; + */ +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION; + +/* + * Copies an item into the queue, either at the front of the queue or the + * back of the queue. + */ +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION; + +/* + * Copies an item out of a queue. + */ +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION; + +#if ( configUSE_QUEUE_SETS == 1 ) + /* + * Checks to see if a queue is a member of a queue set, and if so, notifies + * the queue set that the queue contains data. + */ + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +#endif + +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + +#if( configUSE_MUTEXES == 1 ) + /* + * If a task waiting for a mutex causes the mutex holder to inherit a + * priority, but the waiting task times out, then the holder should + * disinherit the priority - but only down to the highest priority of any + * other tasks that are waiting for the same mutex. This function returns + * that priority. + */ + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; +#endif +/*-----------------------------------------------------------*/ + +/* + * Macro to mark a queue as locked. Locking a queue prevents an ISR from + * accessing the queue event lists. + */ +#define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL(); \ + { \ + if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \ + } \ + if( ( pxQueue )->cTxLock == queueUNLOCKED ) \ + { \ + ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ + } \ + } \ + taskEXIT_CRITICAL() +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; + pxQueue->pcWriteTo = pxQueue->pcHead; + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + + if( xNewQueue == pdFALSE ) + { + /* If there are tasks blocked waiting to read from the queue, then + the tasks will remain blocked as after this function exits the queue + will still be empty. If there are tasks blocked waiting to write to + the queue, then one should be unblocked as after this function exits + it will be possible to write to it. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Ensure the event queues start in the correct state. */ + vListInitialise( &( pxQueue->xTasksWaitingToSend ) ); + vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); + } + } + taskEXIT_CRITICAL(); + + /* A value is returned for calling semantic consistency with previous + versions. */ + return pdPASS; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + /* The StaticQueue_t structure and the queue storage area must be + supplied. */ + configASSERT( pxStaticQueue != NULL ); + + /* A queue storage area should be provided if the item size is not 0, and + should not be provided if the item size is 0. */ + configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ); + configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticQueue_t or StaticSemaphore_t equals the size of + the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + configASSERT( xSize == sizeof( Queue_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + The address of a statically allocated storage area was also passed in + but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewQueue != NULL ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + note this queue was allocated statically in case the queue is + later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + size_t xQueueSizeInBytes; + uint8_t *pucQueueStorage; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* There is not going to be a queue storage area. */ + xQueueSizeInBytes = ( size_t ) 0; + } + else + { + /* Allocate enough space to hold the maximum number of items that + can be in the queue at any time. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + + /* Allocate the queue and storage area. Justification for MISRA + deviation as follows: pvPortMalloc() always ensures returned memory + blocks are aligned per the requirements of the MCU stack. In this case + pvPortMalloc() must return a pointer that is guaranteed to meet the + alignment requirements of the Queue_t structure - which in this case + is an int8_t *. Therefore, whenever the stack alignment requirements + are greater than or equal to the pointer to char requirements the cast + is safe. In other cases alignment requirements are not strict (one or + two bytes). */ + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */ + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + storage area. */ + pucQueueStorage = ( uint8_t * ) pxNewQueue; + pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + note this task was created dynamically in case it is later + deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + else + { + traceQUEUE_CREATE_FAILED( ucQueueType ); + mtCOVERAGE_TEST_MARKER(); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) +{ + /* Remove compiler warnings about unused parameters should + configUSE_TRACE_FACILITY not be set to 1. */ + ( void ) ucQueueType; + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* No RAM was allocated for the queue storage area, but PC head cannot + be set to NULL because NULL is used as a key to say the queue is used as + a mutex. Therefore just set pcHead to point to the queue as a benign + value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; + } + else + { + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; + } + + /* Initialise the queue members as described where the queue type is + defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + pxNewQueue->ucQueueType = ucQueueType; + } + #endif /* configUSE_TRACE_FACILITY */ + + #if( configUSE_QUEUE_SETS == 1 ) + { + pxNewQueue->pxQueueSetContainer = NULL; + } + #endif /* configUSE_QUEUE_SETS */ + + traceQUEUE_CREATE( pxNewQueue ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static void prvInitialiseMutex( Queue_t *pxNewQueue ) + { + if( pxNewQueue != NULL ) + { + /* The queue create function will set all the queue structure members + correctly for a generic queue, but this function is creating a + mutex. Overwrite those members that need to be set differently - + in particular the information required for priority inheritance. */ + pxNewQueue->u.xSemaphore.xMutexHolder = NULL; + pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; + + /* In case this is a recursive mutex. */ + pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + + traceCREATE_MUTEX( pxNewQueue ); + + /* Start with the semaphore in the expected state. */ + ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK ); + } + else + { + traceCREATE_MUTEX_FAILED(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( ( Queue_t * ) xNewQueue ); + + return xNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore; + + /* This function is called by xSemaphoreGetMutexHolder(), and should not + be called directly. Note: This is a good way of determining if the + calling task is the mutex holder, but not a good way of determining the + identity of the mutex holder, as the holder may change between the + following critical section exiting and the function returning. */ + taskENTER_CRITICAL(); + { + if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + } + taskEXIT_CRITICAL(); + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) + { + TaskHandle_t pxReturn; + + configASSERT( xSemaphore ); + + /* Mutexes cannot be used in interrupt service routines, so the mutex + holder should not change in an ISR, and therefore a critical section is + not required here. */ + if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX ) + { + pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder; + } + else + { + pxReturn = NULL; + } + + return pxReturn; + } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ + +#endif +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* If this is the task that holds the mutex then xMutexHolder will not + change outside of this task. If this task does not hold the mutex then + pxMutexHolder can never coincidentally equal the tasks handle, and as + this is the only condition we are interested in it does not matter if + pxMutexHolder is accessed simultaneously by another task. Therefore no + mutual exclusion is required to test the pxMutexHolder variable. */ + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + traceGIVE_MUTEX_RECURSIVE( pxMutex ); + + /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to + the task handle, therefore no underflow check is required. Also, + uxRecursiveCallCount is only modified by the mutex holder, and as + there can only be one, no mutual exclusion is required to modify the + uxRecursiveCallCount member. */ + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--; + + /* Has the recursive call count unwound to 0? */ + if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 ) + { + /* Return the mutex. This will automatically unblock any other + task that might be waiting to access the mutex. */ + ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + /* The mutex cannot be given because the calling task is not the + holder. */ + xReturn = pdFAIL; + + traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxMutex = ( Queue_t * ) xMutex; + + configASSERT( pxMutex ); + + /* Comments regarding mutual exclusion as per those within + xQueueGiveMutexRecursive(). */ + + traceTAKE_MUTEX_RECURSIVE( pxMutex ); + + if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + xReturn = pdPASS; + } + else + { + xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait ); + + /* pdPASS will only be returned if the mutex was successfully + obtained. The calling task may have entered the Blocked state + before reaching here. */ + if( xReturn != pdFAIL ) + { + ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++; + } + else + { + traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex ); + } + } + + return xReturn; + } + +#endif /* configUSE_RECURSIVE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) +{ +BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Is there room on the queue now? The running task must be the + highest priority task wanting to access the queue. If the head item + in the queue is to be overwritten then it does not matter if the + queue is full. */ + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + traceQUEUE_SEND( pxQueue ); + + #if ( configUSE_QUEUE_SETS == 1 ) + { + UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; + + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) + { + /* Do not notify the queue set as an existing item + was overwritten in the queue so the number of items + in the queue has not changed. */ + mtCOVERAGE_TEST_MARKER(); + } + else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to + do this from within the critical section - the + kernel takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes + and the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* If there was a task waiting for data to arrive on the + queue then unblock it now. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The unblocked task has a priority higher than + our own so yield immediately. Yes it is ok to do + this from within the critical section - the kernel + takes care of that. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else if( xYieldRequired != pdFALSE ) + { + /* This path is a special case that will only get + executed if the task was holding multiple mutexes and + the mutexes were given back in an order that is + different to that in which they were taken. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was full and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + + /* Return to the original privilege level before exiting + the function. */ + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was full and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + event list. It is possible that interrupts occurring now + remove this task from the event list again - but as the + scheduler is suspended the task will go onto the pending + ready last instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + ready list into the ready list - so it is feasible that this + task is already in a ready list before it yields - in which + case the yield will not cause a context switch unless there + is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + /* Similar to xQueueGenericSend, except without blocking if there is no room + in the queue. Also don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a + semaphore or mutex. That means prvCopyDataToQueue() cannot result + in a task disinheriting a priority and prvCopyDataToQueue() can be + called here even though the disinherit function does not check if + the scheduler is suspended before accessing the ready lists. */ + ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting + to the queue set caused a higher priority task to + unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + /* Similar to xQueueGenericSendFromISR() but used with semaphores where the + item size is 0. Don't directly wake a task that was blocked on a queue + read, instead return a flag to say whether a context switch is required or + not (i.e. has a task with a higher priority than us been woken by this + post). */ + + configASSERT( pxQueue ); + + /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR() + if the item size is not 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Normally a mutex would not be given from an interrupt, especially if + there is a mutex holder, as priority inheritance makes no sense for an + interrupts, only tasks. */ + configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* When the queue is used to implement a semaphore no data is ever + moved through the queue but it is still valid to see if the queue 'has + space'. */ + if( uxMessagesWaiting < pxQueue->uxLength ) + { + const int8_t cTxLock = pxQueue->cTxLock; + + traceQUEUE_SEND_FROM_ISR( pxQueue ); + + /* A task can only have an inherited priority if it is a mutex + holder - and if there is a mutex holder then the mutex cannot be + given from an ISR. As this is the ISR version of the function it + can be assumed there is no mutex holder and no need to determine if + priority disinheritance is needed. Simply increase the count of + messages (semaphores) available. */ + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + /* The event list is not altered if the queue is locked. This will + be done when the queue is unlocked later. */ + if( cTxLock == queueUNLOCKED ) + { + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The semaphore is a member of a queue set, and + posting to the queue set caused a higher priority + task to unblock. A context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so + record that a context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_QUEUE_SETS */ + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was posted while it was locked. */ + pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + xReturn = errQUEUE_FULL; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data available, remove one item. */ + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_RECEIVE( pxQueue ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* There is now space in the queue, were any tasks waiting to + post to the queue? If so, unblock the highest priority waiting + task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +Queue_t * const pxQueue = xQueue; + +#if( configUSE_MUTEXES == 1 ) + BaseType_t xInheritanceOccurred = pdFALSE; +#endif + + /* Check the queue pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* Check this really is a semaphore, in which case the item size will be + 0. */ + configASSERT( pxQueue->uxItemSize == 0 ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to allow return + statements within the function itself. This is done in the interest + of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + /* Semaphores are queues with an item size of 0, and where the + number of messages in the queue is the semaphore's count value. */ + const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxSemaphoreCount > ( UBaseType_t ) 0 ) + { + traceQUEUE_RECEIVE( pxQueue ); + + /* Semaphores are queues with a data size of zero and where the + messages waiting is the semaphore's count. Reduce the count. */ + pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1; + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* Record the information required to implement + priority inheritance should it become necessary. */ + pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + + /* Check to see if other tasks are blocked waiting to give the + semaphore, and if so, unblock the highest priority such task. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* For inheritance to have occurred there must have been an + initial timeout, and an adjusted timeout cannot become 0, as + if it were 0 the function would have exited. */ + #if( configUSE_MUTEXES == 1 ) + { + configASSERT( xInheritanceOccurred == pdFALSE ); + } + #endif /* configUSE_MUTEXES */ + + /* The semaphore count was 0 and no block time is specified + (or the block time has expired) so exit now. */ + taskEXIT_CRITICAL(); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The semaphore count was 0 and a block time was specified + so configure the timeout structure ready to block. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can give to and take from the semaphore + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + count is 0 then enter the Blocked state to wait for a semaphore to + become available. As semaphores are implemented with queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL(); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There was no timeout and the semaphore count was not 0, so + attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + expired. Otherwise return to attempt to take the semaphore that is + known to be available. As semaphores are implemented by queues the + queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL(); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + task to inherit this task's priority. Now this task + has timed out the priority should be disinherited + again, but only as low as the next highest priority + task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL(); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait ) +{ +BaseType_t xEntryTimeSet = pdFALSE; +TimeOut_t xTimeOut; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + /* Check the pointer is not NULL. */ + configASSERT( ( pxQueue ) ); + + /* The buffer into which data is received can only be NULL if the data size + is zero (so no data is copied into the buffer. */ + configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* Cannot block if the scheduler is suspended. */ + #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + { + configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); + } + #endif + + + /*lint -save -e904 This function relaxes the coding standard somewhat to + allow return statements within the function itself. This is done in the + interest of execution time efficiency. */ + for( ;; ) + { + taskENTER_CRITICAL(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Is there data in the queue now? To be running the calling task + must be the highest priority task wanting to access the queue. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Remember the read position so it can be reset after the data + is read from the queue as this function is only peeking the + data, not removing it. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + traceQUEUE_PEEK( pxQueue ); + + /* The data is not being removed, so reset the read pointer. */ + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + /* The data is being left in the queue, so see if there are + any other tasks waiting for the data. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than this task. */ + queueYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); + return pdPASS; + } + else + { + if( xTicksToWait == ( TickType_t ) 0 ) + { + /* The queue was empty and no block time is specified (or + the block time has expired) so leave now. */ + taskEXIT_CRITICAL(); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else if( xEntryTimeSet == pdFALSE ) + { + /* The queue was empty and a block time was specified so + configure the timeout structure ready to enter the blocked + state. */ + vTaskInternalSetTimeOutState( &xTimeOut ); + xEntryTimeSet = pdTRUE; + } + else + { + /* Entry time was already set. */ + mtCOVERAGE_TEST_MARKER(); + } + } + } + taskEXIT_CRITICAL(); + + /* Interrupts and other tasks can send to and receive from the queue + now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } /*lint -restore */ +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + /* Cannot block in an ISR, so check there is data available. */ + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + const int8_t cRxLock = pxQueue->cRxLock; + + traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); + + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1; + + /* If the queue is locked the event list will not be modified. + Instead update the lock count so the task that unlocks the queue + will know that an ISR has removed data while the queue was + locked. */ + if( cRxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + /* The task waiting has a higher priority than us so + force a context switch. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Increment the lock count so the task that unlocks the queue + knows that data was removed while it was locked. */ + pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) +{ +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; +int8_t *pcOriginalReadPosition; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) ); + configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */ + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Cannot block in an ISR, so check there is data available. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + traceQUEUE_PEEK_FROM_ISR( pxQueue ); + + /* Remember the read position so it can be reset as nothing is + actually being removed from the queue. */ + pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom; + prvCopyDataFromQueue( pxQueue, pvBuffer ); + pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition; + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; + + configASSERT( xQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + + taskENTER_CRITICAL(); + { + uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; + } + taskEXIT_CRITICAL(); + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) +{ +UBaseType_t uxReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + uxReturn = pxQueue->uxMessagesWaiting; + + return uxReturn; +} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ +/*-----------------------------------------------------------*/ + +void vQueueDelete( QueueHandle_t xQueue ) +{ +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + traceQUEUE_DELETE( pxQueue ); + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + vQueueUnregisterQueue( pxQueue ); + } + #endif + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + again. */ + vPortFree( pxQueue ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + /* The queue must have been statically allocated, so is not going to be + deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) + { + ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) + { + return ( ( Queue_t * ) xQueue )->ucQueueType; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if( configUSE_MUTEXES == 1 ) + + static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) + { + UBaseType_t uxHighestPriorityOfWaitingTasks; + + /* If a task waiting for a mutex causes the mutex holder to inherit a + priority, but the waiting task times out, then the holder should + disinherit the priority - but only down to the highest priority of any + other tasks that are waiting for the same mutex. For this purpose, + return the priority of the highest priority task that is waiting for the + mutex. */ + if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U ) + { + uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ); + } + else + { + uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY; + } + + return uxHighestPriorityOfWaitingTasks; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) +{ +BaseType_t xReturn = pdFALSE; +UBaseType_t uxMessagesWaiting; + + /* This function is called from a critical section. */ + + uxMessagesWaiting = pxQueue->uxMessagesWaiting; + + if( pxQueue->uxItemSize == ( UBaseType_t ) 0 ) + { + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + /* The mutex is no longer being held. */ + xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder ); + pxQueue->u.xSemaphore.xMutexHolder = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_MUTEXES */ + } + else if( xPosition == queueSEND_TO_BACK ) + { + ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->pcWriteTo = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. Assert checks null pointer only used when length is 0. */ + pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */ + { + pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xPosition == queueOVERWRITE ) + { + if( uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* An item is not being added but overwritten, so subtract + one from the recorded number of items in the queue so when + one is added again below the number of recorded items remains + correct. */ + --uxMessagesWaiting; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1; + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) +{ + if( pxQueue->uxItemSize != ( UBaseType_t ) 0 ) + { + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */ + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */ + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */ + } +} +/*-----------------------------------------------------------*/ + +static void prvUnlockQueue( Queue_t * const pxQueue ) +{ + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ + + /* The lock counts contains the number of extra data items placed or + removed from the queue while the queue was locked. When a queue is + locked items can be added or removed, but the event lists cannot be + updated. */ + taskENTER_CRITICAL(); + { + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) + { + /* Data was posted while the queue was locked. Are any tasks + blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) + { + if( pxQueue->pxQueueSetContainer != NULL ) + { + if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + the queue set caused a higher priority task to unblock. + A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* Tasks that are removed from the event list will get + added to the pending ready list as the scheduler is still + suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + } + #else /* configUSE_QUEUE_SETS */ + { + /* Tasks that are removed from the event list will get added to + the pending ready list as the scheduler is still suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that + a context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } + } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; + } + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL(); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --cRxLock; + } + else + { + break; + } + } + + pxQueue->cRxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) +{ +BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) +{ +BaseType_t xReturn; +Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already full we may have to block. A critical section + is required to prevent an interrupt removing something from the queue + between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already empty we may have to block. A critical section + is required to prevent an interrupt adding something to the queue + between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + into the ready list as we are within a critical section. + Instead the same pending ready list mechanism is used as if + the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* We cannot block from an ISR, so check there is data available. If + not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + + /* See if there is an empty space in the registry. A NULL name denotes + a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].pcQueueName == NULL ) + { + /* Store the information on this queue. */ + xQueueRegistry[ ux ].pcQueueName = pcQueueName; + xQueueRegistry[ ux ].xHandle = xQueue; + + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t ux; + const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + /* Note there is nothing here to protect against another task adding or + removing entries from the registry while it is being searched. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return pcReturn; + } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void vQueueUnregisterQueue( QueueHandle_t xQueue ) + { + UBaseType_t ux; + + /* See if the handle of the queue being unregistered in actually in the + registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + { + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; + + /* Set the handle to NULL to ensure the same queue handle cannot + appear in the registry twice if it is added, removed, then + added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ + +#endif /* configQUEUE_REGISTRY_SIZE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + Queue_t * const pxQueue = xQueue; + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements. + It can result in vListInsert() being called on a list that can only + possibly ever have one item in it, so the list will be fast, but even + so it should be called with the scheduler locked and not from a critical + section. */ + + /* Only do anything if there are no messages in the queue. This function + will not actually cause the task to block, just place it on a blocked + list. It will not block until the scheduler is unlocked - at which + time a yield will be performed. If an item is added to the queue while + the queue is locked, and the calling task blocks on the queue, then the + calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvUnlockQueue( pxQueue ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) + { + QueueSetHandle_t pxQueue; + + pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET ); + + return pxQueue; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) + { + /* Cannot add a queue/semaphore to more than one queue set. */ + xReturn = pdFAIL; + } + else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* Cannot add a queue/semaphore to a queue set if there are already + items in the queue/semaphore. */ + xReturn = pdFAIL; + } + else + { + ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet; + xReturn = pdPASS; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) + { + BaseType_t xReturn; + Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore; + + if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet ) + { + /* The queue was not a member of the set. */ + xReturn = pdFAIL; + } + else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 ) + { + /* It is dangerous to remove a queue from a set when the queue is + not empty because the queue set will still hold pending events for + the queue. */ + xReturn = pdFAIL; + } + else + { + taskENTER_CRITICAL(); + { + /* The queue is no longer contained in the set. */ + pxQueueOrSemaphore->pxQueueSetContainer = NULL; + } + taskEXIT_CRITICAL(); + xReturn = pdPASS; + } + + return xReturn; + } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */ + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) + { + QueueSetMemberHandle_t xReturn = NULL; + + ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */ + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + + static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) + { + Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + configASSERT( pxQueueSetContainer ); + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_QUEUE_SETS */ + + + + + + + + + + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c new file mode 100644 index 0000000..8cda238 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/stream_buffer.c @@ -0,0 +1,1263 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "stream_buffer.h" + +#if( configUSE_TASK_NOTIFICATIONS != 1 ) + #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c +#endif + +/* Lint e961, e9021 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* If the user has not provided application specific Rx notification macros, +or #defined the notification macros away, them provide default implementations +that uses task notifications. */ +/*lint -save -e9026 Function like macros allowed and needed here so they can be overidden. */ +#ifndef sbRECEIVE_COMPLETED + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbRECEIVE_COMPLETED */ + +#ifndef sbRECEIVE_COMPLETED_FROM_ISR + #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ + pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbRECEIVE_COMPLETED_FROM_ISR */ + +/* If the user has not provided an application specific Tx notification macro, +or #defined the notification macro away, them provide a default implementation +that uses task notifications. */ +#ifndef sbSEND_COMPLETED + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + vTaskSuspendAll(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) xTaskResumeAll(); +#endif /* sbSEND_COMPLETED */ + +#ifndef sbSEND_COMPLETE_FROM_ISR + #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ + { \ + UBaseType_t uxSavedInterruptStatus; \ + \ + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction, \ + pxHigherPriorityTaskWoken ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + } +#endif /* sbSEND_COMPLETE_FROM_ISR */ +/*lint -restore (9026) */ + +/* The number of bytes used to hold the length of a message in the buffer. */ +#define sbBYTES_TO_STORE_MESSAGE_LENGTH ( sizeof( configMESSAGE_BUFFER_LENGTH_TYPE ) ) + +/* Bits stored in the ucFlags field of the stream buffer. */ +#define sbFLAGS_IS_MESSAGE_BUFFER ( ( uint8_t ) 1 ) /* Set if the stream buffer was created as a message buffer, in which case it holds discrete messages rather than a stream. */ +#define sbFLAGS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 2 ) /* Set if the stream buffer was created using statically allocated memory. */ + +/*-----------------------------------------------------------*/ + +/* Structure that hold state information on the buffer. */ +typedef struct StreamBufferDef_t /*lint !e9058 Style convention uses tag. */ +{ + volatile size_t xTail; /* Index to the next item to read within the buffer. */ + volatile size_t xHead; /* Index to the next item to write within the buffer. */ + size_t xLength; /* The length of the buffer pointed to by pucBuffer. */ + size_t xTriggerLevelBytes; /* The number of bytes that must be in the stream buffer before a task that is waiting for data is unblocked. */ + volatile TaskHandle_t xTaskWaitingToReceive; /* Holds the handle of a task waiting for data, or NULL if no tasks are waiting. */ + volatile TaskHandle_t xTaskWaitingToSend; /* Holds the handle of a task waiting to send data to a message buffer that is full. */ + uint8_t *pucBuffer; /* Points to the buffer itself - that is - the RAM that stores the data passed through the buffer. */ + uint8_t ucFlags; + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */ + #endif +} StreamBuffer_t; + +/* + * The number of bytes available to be read from the buffer. + */ +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; + +/* + * Add xCount bytes from pucData into the pxStreamBuffer message buffer. + * Returns the number of bytes written, which will either equal xCount in the + * success case, or 0 if there was not enough space in the buffer (in which case + * no data is written into the buffer). + */ +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then reads an entire + * message out of the buffer. If the stream buffer is being used as a stream + * buffer then read as many bytes as possible from the buffer. + * prvReadBytesFromBuffer() is called to actually extract the bytes from the + * buffer's data storage area. + */ +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) PRIVILEGED_FUNCTION; + +/* + * If the stream buffer is being used as a message buffer, then writes an entire + * message to the buffer. If the stream buffer is being used as a stream + * buffer then write as many bytes as possible to the buffer. + * prvWriteBytestoBuffer() is called to actually send the bytes to the buffer's + * data storage area. + */ +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) PRIVILEGED_FUNCTION; + +/* + * Read xMaxCount bytes from the pxStreamBuffer message buffer and write them + * to pucData. + */ +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, + uint8_t *pucData, + size_t xMaxCount, + size_t xBytesAvailable ) PRIVILEGED_FUNCTION; + +/* + * Called by both pxStreamBufferCreate() and pxStreamBufferCreateStatic() to + * initialise the members of the newly created stream buffer structure. + */ +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) PRIVILEGED_FUNCTION; + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer ) + { + uint8_t *pucAllocatedMemory; + uint8_t ucFlags; + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + if( xIsMessageBuffer == pdTRUE ) + { + /* Is a message buffer but not statically allocated. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER; + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* Not a message buffer and not statically allocated. */ + ucFlags = 0; + configASSERT( xBufferSizeBytes > 0 ); + } + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + /* A stream buffer requires a StreamBuffer_t structure and a buffer. + Both are allocated in a single call to pvPortMalloc(). The + StreamBuffer_t structure is placed at the start of the allocated memory + and the buffer follows immediately after. The requested size is + incremented so the free space is returned as the user would expect - + this is a quirk of the implementation that means otherwise the free + space would be reported as one byte smaller than would be logically + expected. */ + xBufferSizeBytes++; + pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + + if( pucAllocatedMemory != NULL ) + { + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + } + + return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer ) + { + StreamBuffer_t * const pxStreamBuffer = ( StreamBuffer_t * ) pxStaticStreamBuffer; /*lint !e740 !e9087 Safe cast as StaticStreamBuffer_t is opaque Streambuffer_t. */ + StreamBufferHandle_t xReturn; + uint8_t ucFlags; + + configASSERT( pucStreamBufferStorageArea ); + configASSERT( pxStaticStreamBuffer ); + configASSERT( xTriggerLevelBytes <= xBufferSizeBytes ); + + /* A trigger level of 0 would cause a waiting task to unblock even when + the buffer was empty. */ + if( xTriggerLevelBytes == ( size_t ) 0 ) + { + xTriggerLevelBytes = ( size_t ) 1; + } + + if( xIsMessageBuffer != pdFALSE ) + { + /* Statically allocated message buffer. */ + ucFlags = sbFLAGS_IS_MESSAGE_BUFFER | sbFLAGS_IS_STATICALLY_ALLOCATED; + } + else + { + /* Statically allocated stream buffer. */ + ucFlags = sbFLAGS_IS_STATICALLY_ALLOCATED; + } + + /* In case the stream buffer is going to be used as a message buffer + (that is, it will hold discrete messages with a little meta data that + says how big the next message is) check the buffer will be large enough + to hold at least one message. */ + configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticStreamBuffer_t equals the size of the real + message buffer structure. */ + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } /*lint !e529 xSize is referenced is configASSERT() is defined. */ + #endif /* configASSERT_DEFINED */ + + if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pucStreamBufferStorageArea, + xBufferSizeBytes, + xTriggerLevelBytes, + ucFlags ); + + /* Remember this was statically allocated in case it is ever deleted + again. */ + pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); + + xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ + } + else + { + xReturn = NULL; + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + } + + return xReturn; + } + +#endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + traceSTREAM_BUFFER_DELETE( xStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) pdFALSE ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Both the structure and the buffer were allocated using a single call + to pvPortMalloc(), hence only one call to vPortFree() is required. */ + vPortFree( ( void * ) pxStreamBuffer ); /*lint !e9087 Standard free() semantics require void *, plus pxStreamBuffer was allocated by pvPortMalloc(). */ + } + #else + { + /* Should not be possible to get here, ucFlags must be corrupt. + Force an assert. */ + configASSERT( xStreamBuffer == ( StreamBufferHandle_t ) ~0 ); + } + #endif + } + else + { + /* The structure and buffer were not allocated dynamically and cannot be + freed - just scrub the structure so future use will assert. */ + ( void ) memset( pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn = pdFAIL; + +#if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxStreamBufferNumber; +#endif + + configASSERT( pxStreamBuffer ); + + #if( configUSE_TRACE_FACILITY == 1 ) + { + /* Store the stream buffer number so it can be restored after the + reset. */ + uxStreamBufferNumber = pxStreamBuffer->uxStreamBufferNumber; + } + #endif + + /* Can only reset a message buffer if there are no tasks blocked on it. */ + taskENTER_CRITICAL(); + { + if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) + { + if( pxStreamBuffer->xTaskWaitingToSend == NULL ) + { + prvInitialiseNewStreamBuffer( pxStreamBuffer, + pxStreamBuffer->pucBuffer, + pxStreamBuffer->xLength, + pxStreamBuffer->xTriggerLevelBytes, + pxStreamBuffer->ucFlags ); + xReturn = pdPASS; + + #if( configUSE_TRACE_FACILITY == 1 ) + { + pxStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + #endif + + traceSTREAM_BUFFER_RESET( xStreamBuffer ); + } + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; + + configASSERT( pxStreamBuffer ); + + /* It is not valid for the trigger level to be 0. */ + if( xTriggerLevel == ( size_t ) 0 ) + { + xTriggerLevel = ( size_t ) 1; + } + + /* The trigger level is the number of bytes that must be in the stream + buffer before a task that is waiting for data is unblocked. */ + if( xTriggerLevel <= pxStreamBuffer->xLength ) + { + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevel; + xReturn = pdPASS; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xSpace; + + configASSERT( pxStreamBuffer ); + + xSpace = pxStreamBuffer->xLength + pxStreamBuffer->xTail; + xSpace -= pxStreamBuffer->xHead; + xSpace -= ( size_t ) 1; + + if( xSpace >= pxStreamBuffer->xLength ) + { + xSpace -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xSpace; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn; + + configASSERT( pxStreamBuffer ); + + xReturn = prvBytesInBuffer( pxStreamBuffer ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace = 0; +size_t xRequiredSpace = xDataLengthBytes; +TimeOut_t xTimeOut; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + + /* Overflow? */ + configASSERT( xRequiredSpace > xDataLengthBytes ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + vTaskSetTimeOutState( &xTimeOut ); + + do + { + /* Wait until the required number of bytes are free in the message + buffer. */ + taskENTER_CRITICAL(); + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + + if( xSpace < xRequiredSpace ) + { + /* Clear notification state as going to wait for space. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one writer. */ + configASSERT( pxStreamBuffer->xTaskWaitingToSend == NULL ); + pxStreamBuffer->xTaskWaitingToSend = xTaskGetCurrentTaskHandle(); + } + else + { + taskEXIT_CRITICAL(); + break; + } + } + taskEXIT_CRITICAL(); + + traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToSend = NULL; + + } while( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xSpace == ( size_t ) 0 ) + { + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ); + + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void *pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xSpace; +size_t xRequiredSpace = xDataLengthBytes; + + configASSERT( pvTxData ); + configASSERT( pxStreamBuffer ); + + /* This send function is used to write to both message buffers and stream + buffers. If this is a message buffer then the space needed must be + increased by the amount of bytes needed to store the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xRequiredSpace += sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); + xReturn = prvWriteMessageToBuffer( pxStreamBuffer, pvTxData, xDataLengthBytes, xSpace, xRequiredSpace ); + + if( xReturn > ( size_t ) 0 ) + { + /* Was a task waiting for the data? */ + if( prvBytesInBuffer( pxStreamBuffer ) >= pxStreamBuffer->xTriggerLevelBytes ) + { + sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteMessageToBuffer( StreamBuffer_t * const pxStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + size_t xSpace, + size_t xRequiredSpace ) +{ + BaseType_t xShouldWrite; + size_t xReturn; + + if( xSpace == ( size_t ) 0 ) + { + /* Doesn't matter if this is a stream buffer or a message buffer, there + is no space to write. */ + xShouldWrite = pdFALSE; + } + else if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) == ( uint8_t ) 0 ) + { + /* This is a stream buffer, as opposed to a message buffer, so writing a + stream of bytes rather than discrete messages. Write as many bytes as + possible. */ + xShouldWrite = pdTRUE; + xDataLengthBytes = configMIN( xDataLengthBytes, xSpace ); + } + else if( xSpace >= xRequiredSpace ) + { + /* This is a message buffer, as opposed to a stream buffer, and there + is enough space to write both the message length and the message itself + into the buffer. Start by writing the length of the data, the data + itself will be written later in this function. */ + xShouldWrite = pdTRUE; + ( void ) prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) &( xDataLengthBytes ), sbBYTES_TO_STORE_MESSAGE_LENGTH ); + } + else + { + /* There is space available, but not enough space. */ + xShouldWrite = pdFALSE; + } + + if( xShouldWrite != pdFALSE ) + { + /* Writes the data itself. */ + xReturn = prvWriteBytesToBuffer( pxStreamBuffer, ( const uint8_t * ) pvTxData, xDataLengthBytes ); /*lint !e9079 Storage buffer is implemented as uint8_t for ease of sizing, alighment and access. */ + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + if( xTicksToWait != ( TickType_t ) 0 ) + { + /* Checking if there is data and clearing the notification state must be + performed atomically. */ + taskENTER_CRITICAL(); + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* If this function was invoked by a message buffer read then + xBytesToStoreMessageLength holds the number of bytes used to hold + the length of the next discrete message. If this function was + invoked by a stream buffer read then xBytesToStoreMessageLength will + be 0. */ + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Clear notification state as going to wait for data. */ + ( void ) xTaskNotifyStateClear( NULL ); + + /* Should only be one reader. */ + configASSERT( pxStreamBuffer->xTaskWaitingToReceive == NULL ); + pxStreamBuffer->xTaskWaitingToReceive = xTaskGetCurrentTaskHandle(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + if( xBytesAvailable <= xBytesToStoreMessageLength ) + { + /* Wait for data to be available. */ + traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ); + ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); + pxStreamBuffer->xTaskWaitingToReceive = NULL; + + /* Recheck the data available after blocking. */ + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + } + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ); + sbRECEIVE_COMPLETED( pxStreamBuffer ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ); + mtCOVERAGE_TEST_MARKER(); + } + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReturn, xBytesAvailable, xOriginalTail; +configMESSAGE_BUFFER_LENGTH_TYPE xTempReturn; + + configASSERT( pxStreamBuffer ); + + /* Ensure the stream buffer is being used as a message buffer. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + if( xBytesAvailable > sbBYTES_TO_STORE_MESSAGE_LENGTH ) + { + /* The number of bytes available is greater than the number of bytes + required to hold the length of the next message, so another message + is available. Return its length without removing the length bytes + from the buffer. A copy of the tail is stored so the buffer can be + returned to its prior state as the message is not actually being + removed from the buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempReturn, sbBYTES_TO_STORE_MESSAGE_LENGTH, xBytesAvailable ); + xReturn = ( size_t ) xTempReturn; + pxStreamBuffer->xTail = xOriginalTail; + } + else + { + /* The minimum amount of bytes in a message buffer is + ( sbBYTES_TO_STORE_MESSAGE_LENGTH + 1 ), so if xBytesAvailable is + less than sbBYTES_TO_STORE_MESSAGE_LENGTH the only other valid + value is 0. */ + configASSERT( xBytesAvailable == 0 ); + xReturn = 0; + } + } + else + { + xReturn = 0; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +size_t xReceivedLength = 0, xBytesAvailable, xBytesToStoreMessageLength; + + configASSERT( pvRxData ); + configASSERT( pxStreamBuffer ); + + /* This receive function is used by both message buffers, which store + discrete messages, and stream buffers, which store a continuous stream of + bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the + message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); + + /* Whether receiving a discrete message (where xBytesToStoreMessageLength + holds the number of bytes used to store the message length) or a stream of + bytes (where xBytesToStoreMessageLength is zero), the number of bytes + available must be greater than xBytesToStoreMessageLength to be able to + read bytes from the buffer. */ + if( xBytesAvailable > xBytesToStoreMessageLength ) + { + xReceivedLength = prvReadMessageFromBuffer( pxStreamBuffer, pvRxData, xBufferLengthBytes, xBytesAvailable, xBytesToStoreMessageLength ); + + /* Was a task waiting for space in the buffer? */ + if( xReceivedLength != ( size_t ) 0 ) + { + sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ); + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadMessageFromBuffer( StreamBuffer_t *pxStreamBuffer, + void *pvRxData, + size_t xBufferLengthBytes, + size_t xBytesAvailable, + size_t xBytesToStoreMessageLength ) +{ +size_t xOriginalTail, xReceivedLength, xNextMessageLength; +configMESSAGE_BUFFER_LENGTH_TYPE xTempNextMessageLength; + + if( xBytesToStoreMessageLength != ( size_t ) 0 ) + { + /* A discrete message is being received. First receive the length + of the message. A copy of the tail is stored so the buffer can be + returned to its prior state if the length of the message is too + large for the provided buffer. */ + xOriginalTail = pxStreamBuffer->xTail; + ( void ) prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) &xTempNextMessageLength, xBytesToStoreMessageLength, xBytesAvailable ); + xNextMessageLength = ( size_t ) xTempNextMessageLength; + + /* Reduce the number of bytes available by the number of bytes just + read out. */ + xBytesAvailable -= xBytesToStoreMessageLength; + + /* Check there is enough space in the buffer provided by the + user. */ + if( xNextMessageLength > xBufferLengthBytes ) + { + /* The user has provided insufficient space to read the message + so return the buffer to its previous state (so the length of + the message is in the buffer again). */ + pxStreamBuffer->xTail = xOriginalTail; + xNextMessageLength = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* A stream of bytes is being received (as opposed to a discrete + message), so read as many bytes as possible. */ + xNextMessageLength = xBufferLengthBytes; + } + + /* Read the actual data. */ + xReceivedLength = prvReadBytesFromBuffer( pxStreamBuffer, ( uint8_t * ) pvRxData, xNextMessageLength, xBytesAvailable ); /*lint !e9079 Data storage area is implemented as uint8_t array for ease of sizing, indexing and alignment. */ + + return xReceivedLength; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) +{ +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +size_t xTail; + + configASSERT( pxStreamBuffer ); + + /* True if no bytes are available. */ + xTail = pxStreamBuffer->xTail; + if( pxStreamBuffer->xHead == xTail ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) +{ +BaseType_t xReturn; +size_t xBytesToStoreMessageLength; +const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + + /* This generic version of the receive function is used by both message + buffers, which store discrete messages, and stream buffers, which store a + continuous stream of bytes. Discrete messages include an additional + sbBYTES_TO_STORE_MESSAGE_LENGTH bytes that hold the length of the message. */ + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ) != ( uint8_t ) 0 ) + { + xBytesToStoreMessageLength = sbBYTES_TO_STORE_MESSAGE_LENGTH; + } + else + { + xBytesToStoreMessageLength = 0; + } + + /* True if the available space equals zero. */ + if( xStreamBufferSpacesAvailable( xStreamBuffer ) <= xBytesToStoreMessageLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t *pxHigherPriorityTaskWoken ) +{ +StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; +BaseType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + configASSERT( pxStreamBuffer ); + + uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) + { + ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToSend, + ( uint32_t ) 0, + eNoAction, + pxHigherPriorityTaskWoken ); + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +static size_t prvWriteBytesToBuffer( StreamBuffer_t * const pxStreamBuffer, const uint8_t *pucData, size_t xCount ) +{ +size_t xNextHead, xFirstLength; + + configASSERT( xCount > ( size_t ) 0 ); + + xNextHead = pxStreamBuffer->xHead; + + /* Calculate the number of bytes that can be added in the first write - + which may be less than the total number of bytes that need to be added if + the buffer will wrap back to the beginning. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextHead, xCount ); + + /* Write as many bytes as can be written in the first write. */ + configASSERT( ( xNextHead + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void* ) ( &( pxStreamBuffer->pucBuffer[ xNextHead ] ) ), ( const void * ) pucData, xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the number of bytes written was less than the number that could be + written in the first write... */ + if( xCount > xFirstLength ) + { + /* ...then write the remaining bytes to the start of the buffer. */ + configASSERT( ( xCount - xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pxStreamBuffer->pucBuffer, ( const void * ) &( pucData[ xFirstLength ] ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xNextHead += xCount; + if( xNextHead >= pxStreamBuffer->xLength ) + { + xNextHead -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxStreamBuffer->xHead = xNextHead; + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvReadBytesFromBuffer( StreamBuffer_t *pxStreamBuffer, uint8_t *pucData, size_t xMaxCount, size_t xBytesAvailable ) +{ +size_t xCount, xFirstLength, xNextTail; + + /* Use the minimum of the wanted bytes and the available bytes. */ + xCount = configMIN( xBytesAvailable, xMaxCount ); + + if( xCount > ( size_t ) 0 ) + { + xNextTail = pxStreamBuffer->xTail; + + /* Calculate the number of bytes that can be read - which may be + less than the number wanted if the data wraps around to the start of + the buffer. */ + xFirstLength = configMIN( pxStreamBuffer->xLength - xNextTail, xCount ); + + /* Obtain the number of bytes it is possible to obtain in the first + read. Asserts check bounds of read and write. */ + configASSERT( xFirstLength <= xMaxCount ); + configASSERT( ( xNextTail + xFirstLength ) <= pxStreamBuffer->xLength ); + ( void ) memcpy( ( void * ) pucData, ( const void * ) &( pxStreamBuffer->pucBuffer[ xNextTail ] ), xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + + /* If the total number of wanted bytes is greater than the number + that could be read in the first read... */ + if( xCount > xFirstLength ) + { + /*...then read the remaining bytes from the start of the buffer. */ + configASSERT( xCount <= xMaxCount ); + ( void ) memcpy( ( void * ) &( pucData[ xFirstLength ] ), ( void * ) ( pxStreamBuffer->pucBuffer ), xCount - xFirstLength ); /*lint !e9087 memcpy() requires void *. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Move the tail pointer to effectively remove the data read from + the buffer. */ + xNextTail += xCount; + + if( xNextTail >= pxStreamBuffer->xLength ) + { + xNextTail -= pxStreamBuffer->xLength; + } + + pxStreamBuffer->xTail = xNextTail; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static size_t prvBytesInBuffer( const StreamBuffer_t * const pxStreamBuffer ) +{ +/* Returns the distance between xTail and xHead. */ +size_t xCount; + + xCount = pxStreamBuffer->xLength + pxStreamBuffer->xHead; + xCount -= pxStreamBuffer->xTail; + if ( xCount >= pxStreamBuffer->xLength ) + { + xCount -= pxStreamBuffer->xLength; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCount; +} +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, + uint8_t * const pucBuffer, + size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + uint8_t ucFlags ) +{ + /* Assert here is deliberately writing to the entire buffer to ensure it can + be written to without generating exceptions, and is setting the buffer to a + known value to assist in development/debugging. */ + #if( configASSERT_DEFINED == 1 ) + { + /* The value written just has to be identifiable when looking at the + memory. Don't use 0xA5 as that is the stack fill value and could + result in confusion as to what is actually being observed. */ + const BaseType_t xWriteValue = 0x55; + configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); + } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ + #endif + + ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + pxStreamBuffer->pucBuffer = pucBuffer; + pxStreamBuffer->xLength = xBufferSizeBytes; + pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; + pxStreamBuffer->ucFlags = ucFlags; +} + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxStreamBufferGetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer ) + { + return xStreamBuffer->uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vStreamBufferSetStreamBufferNumber( StreamBufferHandle_t xStreamBuffer, UBaseType_t uxStreamBufferNumber ) + { + xStreamBuffer->uxStreamBufferNumber = uxStreamBufferNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + uint8_t ucStreamBufferGetStreamBufferType( StreamBufferHandle_t xStreamBuffer ) + { + return ( xStreamBuffer->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER ); + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c new file mode 100644 index 0000000..f9c4eb6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/tasks.c @@ -0,0 +1,5214 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" +#include "stack_macros.h" + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting +functions but without including stdio.h here. */ +#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) + /* At the bottom of this file are two optional functions that can be used + to generate human readable text from the raw data generated by the + uxTaskGetSystemState() function. Note the formatting functions are provided + for convenience only, and are NOT considered part of the kernel. */ + #include +#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ + +#if( configUSE_PREEMPTION == 0 ) + /* If the cooperative scheduler is being used then a yield should not be + performed just because a higher priority task has been woken. */ + #define taskYIELD_IF_USING_PREEMPTION() +#else + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() +#endif + +/* Values that can be assigned to the ucNotifyState member of the TCB. */ +#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) +#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) +#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 ) + +/* + * The value used to fill the stack of a task when the task is created. This + * is used purely for checking the high water mark for tasks. + */ +#define tskSTACK_FILL_BYTE ( 0xa5U ) + +/* Bits used to recored how a task's stack and TCB were allocated. */ +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + +/* If any of the following are set then task stacks are filled with a known +value so the high water mark can be determined. If none of the following are +set then don't fill the stack so there is no unnecessary dependency on memset. */ +#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 +#else + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 +#endif + +/* + * Macros used by vListTask to indicate which state a task is in. + */ +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* + * Some kernel aware debuggers require the data the debugger needs access to be + * global, rather than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + +/* The name allocated to the Idle task. This can be overridden by defining +configIDLE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configIDLE_TASK_NAME + #define configIDLE_TASK_NAME "IDLE" +#endif + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is + performed in a generic way that is not optimised to any particular + microcontroller architecture. */ + + /* uxTopReadyPriority holds the priority of the highest priority ready + state task. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) \ + { \ + if( ( uxPriority ) > uxTopReadyPriority ) \ + { \ + uxTopReadyPriority = ( uxPriority ); \ + } \ + } /* taskRECORD_READY_PRIORITY */ + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority = uxTopReadyPriority; \ + \ + /* Find the highest priority queue that contains ready tasks. */ \ + while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \ + { \ + configASSERT( uxTopPriority ); \ + --uxTopPriority; \ + } \ + \ + /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ + the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + + /*-----------------------------------------------------------*/ + + /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as + they are only required when a port optimised method of task selection is + being used. */ + #define taskRESET_READY_PRIORITY( uxPriority ) + #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is + performed in a way that is tailored to the particular microcontroller + architecture being used. */ + + /* A port optimised version is provided. Call the port defined macros. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + + /*-----------------------------------------------------------*/ + + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority; \ + \ + /* Find the highest priority list that contains ready tasks. */ \ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + + /*-----------------------------------------------------------*/ + + /* A port optimised version is provided, call it only if the TCB being reset + is being referenced from a ready list. If it is referenced from a delayed + or suspended list then it won't be in a ready list. */ + #define taskRESET_READY_PRIORITY( uxPriority ) \ + { \ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ + { \ + portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ + } \ + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick +count overflows. */ +#define taskSWITCH_DELAYED_LISTS() \ +{ \ + List_t *pxTemp; \ + \ + /* The delayed tasks list should be empty when the lists are switched. */ \ + configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \ + \ + pxTemp = pxDelayedTaskList; \ + pxDelayedTaskList = pxOverflowDelayedTaskList; \ + pxOverflowDelayedTaskList = pxTemp; \ + xNumOfOverflows++; \ + prvResetNextTaskUnblockTime(); \ +} + +/*-----------------------------------------------------------*/ + +/* + * Place the task represented by pxTCB into the appropriate ready list for + * the task. It is inserted at the end of the list. + */ +#define prvAddTaskToReadyList( pxTCB ) \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +/*-----------------------------------------------------------*/ + +/* + * Several functions take an TaskHandle_t parameter that can optionally be NULL, + * where NULL is used to indicate that the handle of the currently executing + * task should be used in place of the parameter. This macro simply checks to + * see if the parameter is NULL and returns a pointer to the appropriate TCB. + */ +#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) ) + +/* The item value of the event list item is normally used to hold the priority +of the task to which it belongs (coded to allow it to be held in reverse +priority order). However, it is occasionally borrowed for other purposes. It +is important its value is not updated due to a task priority change while it is +being used for another purpose. The following bit definition is used to inform +the scheduler that the value should not be changed - in which case it is the +responsibility of whichever module is using the value to ensure it gets set back +to its original value when it is released. */ +#if( configUSE_16_BIT_TICKS == 1 ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U +#else + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#endif + +/* + * Task control block. A task control block (TCB) is allocated for each task, + * and stores task state information, including a pointer to the task's context + * (the task's run time environment, including register values) + */ +typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + #endif + + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ + StackType_t *pxStack; /*< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */ + #endif + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + #endif + + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxMutexesHeld; + #endif + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + TaskHookFunction_t pxTaskTag; + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + + #if( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + /* Allocate a Newlib reent structure that is specific to this task. + Note Newlib support has been included by popular demand, but is not + used by the FreeRTOS maintainers themselves. FreeRTOS is not + responsible for resulting newlib operation. User must be familiar with + newlib and must provide system-wide implementations of the necessary + stubs. Be warned that (at the time of writing) the current newlib design + implements a system-wide malloc() that must be provided with locks. */ + struct _reent xNewLib_reent; + #endif + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + volatile uint32_t ulNotifiedValue; + volatile uint8_t ucNotifyState; + #endif + + /* See the comments in FreeRTOS.h with the definition of + tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDelayAborted; + #endif + + #if( configUSE_POSIX_ERRNO == 1 ) + int iTaskErrno; + #endif + +} tskTCB; + +/* The old tskTCB name is maintained above then typedefed to the new TCB_t name +below to enable the use of older kernel aware debuggers. */ +typedef tskTCB TCB_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; + +/* Lists for ready and blocked tasks. -------------------- +xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but +doing so breaks some kernel aware debuggers and debuggers that rely on removing +the static qualifier. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ] = { 0 };/*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1 = { 0 }; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2 = { 0 }; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList = NULL; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList = NULL; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList = { 0 }; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ + +#if( INCLUDE_vTaskDelete == 1 ) + +PRIVILEGED_DATA static List_t xTasksWaitingTermination = { 0 }; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; + +#endif + +#if ( INCLUDE_vTaskSuspend == 1 ) + + PRIVILEGED_DATA static List_t xSuspendedTaskList = { 0 }; /*< Tasks that are currently suspended. */ + +#endif + +/* Global POSIX errno. Its value is changed upon context switching to match +the errno of the currently running task. */ +#if ( configUSE_POSIX_ERRNO == 1 ) + int FreeRTOS_errno = 0; +#endif + +/* Other file private variables. --------------------------------*/ +PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; +PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; +PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; +PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +/* Context switches are held pending while the scheduler is suspended. Also, +interrupts must not manipulate the xStateListItem of a TCB, or any of the +lists the xStateListItem can be referenced from, if the scheduler is suspended. +If an interrupt needs to unblock a task while the scheduler is suspended then it +moves the task's event list item into the xPendingReadyList, ready for the +kernel to move the task from the pending ready list into the real ready list +when the scheduler is unsuspended. The pending ready list itself can only be +accessed from a critical section. */ +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + /* Do not move these variables to function scope as doing so prevents the + code working with debuggers that need to remove the static qualifier. */ + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +/* Callback function prototypes. --------------------------*/ +#if( configCHECK_FOR_STACK_OVERFLOW > 0 ) + + extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName ); + +#endif + +#if( configUSE_TICK_HOOK > 0 ) + + extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +/* File private functions. --------------------------------*/ + +/** + * Utility task that simply returns pdTRUE if the task referenced by xTask is + * currently in the Suspended state, or pdFALSE if the task referenced by xTask + * is in any other state. + */ +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* INCLUDE_vTaskSuspend */ + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first task. + */ +static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; + +/* + * The idle task, which as all tasks is implemented as a never ending loop. + * The idle task is automatically created and added to the ready lists upon + * creation of the first user task. + * + * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ); + +/* + * Utility to free all memory allocated by the scheduler to hold a TCB, + * including the stack pointed to by the TCB. + * + * This does not free memory allocated by the task itself (i.e. memory + * allocated by calls to pvPortMalloc from within the tasks application code). + */ +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Used only by the idle task. This checks to see if anything has been placed + * in the list of tasks waiting to be deleted. If so the task is cleaned up + * and its TCB deleted. + */ +static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; + +/* + * The currently executing task is entering the Blocked state. Add the task to + * either the current or the overflow delayed task list. + */ +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * Fills an TaskStatus_t structure with information on each task that is + * referenced from the pxList list (which may be a ready list, a delayed list, + * a suspended list, etc.). + * + * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM + * NORMAL APPLICATION CODE. + */ +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Searches pxList for a task with name pcNameToQuery - returning a handle to + * the task if it is found, or NULL if the task is not found. + */ +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION; + +#endif + +/* + * When a task is created, the stack of the task is filled with a known value. + * This function determines the 'high water mark' of the task stack by + * determining how much of the stack remains at the original preset value. + */ +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Return the amount of time, in ticks, that will pass before the kernel will + * next move a task from the Blocked state to the Running state. + * + * This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user + * defined low power mode implementations require configUSE_TICKLESS_IDLE to be + * set to a value other than 1. + */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Set xNextTaskUnblockTime to the time at which the next Blocked state task + * will exit the Blocked state. + */ +static void prvResetNextTaskUnblockTime( void ); + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + /* + * Helper function used to pad task names with spaces when printing out + * human readable tables of task information. + */ + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION; + +/* + * freertos_tasks_c_additions_init() should only be called if the user definable + * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro + * called by the function. + */ +#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + + static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION; + +#endif + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + { + TCB_t *pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTask_t equals the size of the real task + structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + } + #endif /* configASSERT_DEFINED */ + + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note + this task had a statically allocated stack in case it is + later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + does not grow into the TCB. Likewise if the stack grows up then allocate + the TCB then the stack. */ + #if( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + the implementation of the port malloc function and whether or not static + allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + The base of the stack memory stored in the TCB so the task can + be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } + } + #else /* portSTACK_GROWTH */ + { + StackType_t *pxStack; + + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + it again. */ + vPortFree( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } + } + #endif /* portSTACK_GROWTH */ + + if( pxNewTCB != NULL ) + { + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions ) +{ +StackType_t *pxTopOfStack; +UBaseType_t x; + + #if( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) + { + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */ + + /* Calculate the top of stack address. This depends on whether the stack + grows from high memory to low (as per the 80x86) or vice versa. + portSTACK_GROWTH is used to make the result positive or negative as required + by the port. */ + #if( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + #if( configRECORD_STACK_HIGH_ADDRESS == 1 ) + { + /* Also record the stack's high address, which may assist + debugging. */ + pxNewTCB->pxEndOfStack = pxTopOfStack; + } + #endif /* configRECORD_STACK_HIGH_ADDRESS */ + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + if( pcName != NULL ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + } + else + { + /* The task has not been given a name, so just ensure there is a NULL + terminator when it is read out. */ + pxNewTCB->pcTaskName[ 0 ] = 0x00; + } + + /* This is used as an array index so must ensure it's not too large. First + remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xStateListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) + { + pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL; + } + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + pxNewTCB->ulNotifiedValue = 0; + pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + but had been interrupted by the scheduler. The return address is set + to the start of the task function. Once the stack has been initialised + the top of stack variable is updated. */ + #if( portUSING_MPU_WRAPPERS == 1 ) + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #else /* portUSING_MPU_WRAPPERS */ + { + /* If the port has capability to detect stack overflow, + pass the stack end address to the stack initialization + function as well. */ + #if( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #endif /* portUSING_MPU_WRAPPERS */ + + if( pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + if( pxCurrentTCB == NULL ) + { + /* There are no other tasks, or all the other tasks are in + the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + initialisation required. We will not recover if this call + fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + current task if it is the highest priority task to be created + so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + void vTaskDelete( TaskHandle_t xTaskToDelete ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the calling task that is + being deleted. */ + pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + + /* Remove task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + detect that the task lists need re-generating. This is done before + portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + not return. */ + uxTaskNumber++; + + if( pxTCB == pxCurrentTCB ) + { + /* A task is deleting itself. This cannot complete within the + task itself, as a context switch to another task is required. + Place the task in the termination list. The idle task will + check the termination list and free up any memory allocated by + the scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + there is a task that has been deleted and that it should therefore + check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* The pre-delete hook is primarily for the Windows simulator, + in which Windows specific clean up operations are performed, + after which it is not possible to yield away from this task - + hence xYieldPending is used to latch that a context switch is + required. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + } + else + { + --uxCurrentNumberOfTasks; + prvDeleteTCB( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } + + traceTASK_DELETE( pxTCB ); + } + taskEXIT_CRITICAL(); + + /* Force a reschedule if it is the currently running task that has just + been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelayUntil == 1 ) + + void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) + { + TickType_t xTimeToWake; + BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; + + configASSERT( pxPreviousWakeTime ); + configASSERT( ( xTimeIncrement > 0U ) ); + configASSERT( uxSchedulerSuspended == 0 ); + + vTaskSuspendAll(); + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount; + + /* Generate the tick time at which the task wants to wake. */ + xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; + + if( xConstTickCount < *pxPreviousWakeTime ) + { + /* The tick count has overflowed since this function was + lasted called. In this case the only time we should ever + actually delay is if the wake time has also overflowed, + and the wake time is greater than the tick time. When this + is the case it is as if neither time had overflowed. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The tick time has not overflowed. In this case we will + delay if either the wake time has overflowed, and/or the + tick time is less than the wake time. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Update the wake time ready for the next call. */ + *pxPreviousWakeTime = xTimeToWake; + + if( xShouldDelay != pdFALSE ) + { + traceTASK_DELAY_UNTIL( xTimeToWake ); + + /* prvAddCurrentTaskToDelayedList() needs the block time, not + the time to wake, so subtract the current tick count. */ + prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + xAlreadyYielded = xTaskResumeAll(); + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelayUntil */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + + void vTaskDelay( const TickType_t xTicksToDelay ) + { + BaseType_t xAlreadyYielded = pdFALSE; + + /* A delay time of zero just forces a reschedule. */ + if( xTicksToDelay > ( TickType_t ) 0U ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskSuspendAll(); + { + traceTASK_DELAY(); + + /* A task that is removed from the event list while the + scheduler is suspended will not get placed in the ready + list or removed from the blocked list until the scheduler + is resumed. + + This task cannot be in an event list as it is the currently + executing task. */ + prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); + } + xAlreadyYielded = xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelay */ +/*-----------------------------------------------------------*/ + +#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) ) + + eTaskState eTaskGetState( TaskHandle_t xTask ) + { + eTaskState eReturn; + List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList; + const TCB_t * const pxTCB = xTask; + + configASSERT( pxTCB ); + + if( pxTCB == pxCurrentTCB ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + { + taskENTER_CRITICAL(); + { + pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxDelayedList = pxDelayedTaskList; + pxOverflowedDelayedList = pxOverflowDelayedTaskList; + } + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + { + /* The task being queried is referenced from one of the Blocked + lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) + { + /* The task being queried is referenced from the suspended + list. Is it genuinely suspended or is it blocked + indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + { + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + /* The task does not appear on the event list item of + and of the RTOS objects, but could still be in the + blocked state if it is waiting on its notification + rather than waiting on an object. */ + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + eReturn = eBlocked; + } + else + { + eReturn = eSuspended; + } + } + #else + { + eReturn = eSuspended; + } + #endif + } + else + { + eReturn = eBlocked; + } + } + #endif + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + tasks list, or it is not referenced from any lists at + all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + Ready (including pending ready) state. */ + eReturn = eReady; + } + } + + return eReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_eTaskGetState */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the task + that called uxTaskPriorityGet() that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + taskEXIT_CRITICAL(); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) + { + TCB_t const *pxTCB; + UBaseType_t uxReturn, uxSavedInterruptState; + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* If null is passed in here then it is the priority of the calling + task that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskPrioritySet == 1 ) + + void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority ) + { + TCB_t *pxTCB; + UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; + BaseType_t xYieldRequired = pdFALSE; + + configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); + + /* Ensure the new priority is valid. */ + if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the calling + task that is being changed. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + traceTASK_PRIORITY_SET( pxTCB, uxNewPriority ); + + #if ( configUSE_MUTEXES == 1 ) + { + uxCurrentBasePriority = pxTCB->uxBasePriority; + } + #else + { + uxCurrentBasePriority = pxTCB->uxPriority; + } + #endif + + if( uxCurrentBasePriority != uxNewPriority ) + { + /* The priority change may have readied a task of higher + priority than the calling task. */ + if( uxNewPriority > uxCurrentBasePriority ) + { + if( pxTCB != pxCurrentTCB ) + { + /* The priority of a task other than the currently + running task is being raised. Is the priority being + raised above that of the running task? */ + if( uxNewPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The priority of the running task is being raised, + but the running task must already be the highest + priority task able to run so no yield is required. */ + } + } + else if( pxTCB == pxCurrentTCB ) + { + /* Setting the priority of the running task down means + there may now be another task of higher priority that + is ready to execute. */ + xYieldRequired = pdTRUE; + } + else + { + /* Setting the priority of any other task down does not + require a yield as the running task must be above the + new priority of the task being modified. */ + } + + /* Remember the ready list the task might be referenced from + before its uxPriority member is changed so the + taskRESET_READY_PRIORITY() macro can function correctly. */ + uxPriorityUsedOnEntry = pxTCB->uxPriority; + + #if ( configUSE_MUTEXES == 1 ) + { + /* Only change the priority being used if the task is not + currently using an inherited priority. */ + if( pxTCB->uxBasePriority == pxTCB->uxPriority ) + { + pxTCB->uxPriority = uxNewPriority; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else + { + pxTCB->uxPriority = uxNewPriority; + } + #endif + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + nothing more than change its priority variable. However, if + the task is in a ready list it needs to be removed and placed + in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + adding it to it's new ready list. As we are in a critical + section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + there is no need to check again and the port level + reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xYieldRequired != pdFALSE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Remove compiler warning about unused variables when the port + optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskSuspend( TaskHandle_t xTaskToSuspend ) + { + TCB_t *pxTCB; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the running task that is + being suspended. */ + pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); + + traceTASK_SUSPEND( pxTCB ); + + /* Remove task from the ready/delayed list and place in the + suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if( configUSE_TASK_NOTIFICATIONS == 1 ) + { + if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + now suspended, so no notification was received. */ + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + } + #endif + } + taskEXIT_CRITICAL(); + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + task that is now in the Suspended state. */ + taskENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + /* The scheduler is not running, but the task that was pointed + to by pxCurrentTCB has just been suspended and pxCurrentTCB + must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCB back to + NULL so when the next task is created pxCurrentTCB will + be set to point to it no matter what its relative priority + is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) + { + BaseType_t xReturn = pdFALSE; + const TCB_t * const pxTCB = xTask; + + /* Accesses xPendingReadyList so must be called from a critical + section. */ + + /* It does not make sense to check if the calling task is suspended. */ + configASSERT( xTask ); + + /* Is the task being resumed actually in the suspended list? */ + if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* Has the task already been resumed from within an ISR? */ + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + { + /* Is it in the suspended list because it is in the Suspended + state, or because is is blocked with no timeout? */ + if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */ + { + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskResume( TaskHandle_t xTaskToResume ) + { + TCB_t * const pxTCB = xTaskToResume; + + /* It does not make sense to resume the calling task. */ + configASSERT( xTaskToResume ); + + /* The parameter cannot be NULL as it is impossible to resume the + currently executing task. */ + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + { + taskENTER_CRITICAL(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + but will leave the lists in the correct state for the + next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ + +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) + { + BaseType_t xYieldRequired = pdFALSE; + TCB_t * const pxTCB = xTaskToResume; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToResume ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Ready lists can be accessed so move the task from the + suspended list to the ready list directly. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + is held in the pending ready list until the scheduler is + unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +void vTaskStartScheduler( void ) +{ +BaseType_t xReturn; + + /* Add the idle task at the lowest priority. */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxIdleTaskTCBBuffer = NULL; + StackType_t *pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandle != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + #if ( configUSE_TIMERS == 1 ) + { + if( xReturn == pdPASS ) + { + xReturn = xTimerCreateTimerTask(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TIMERS */ + + if( xReturn == pdPASS ) + { + /* freertos_tasks_c_additions_init() should only be called if the user + definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is + the only macro called by the function. */ + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + { + freertos_tasks_c_additions_init(); + } + #endif + + /* Interrupts are turned off here, to ensure a tick does not occur + before or during the call to xPortStartScheduler(). The stacks of + the created tasks contain a status word with interrupts switched on + so interrupts will automatically get re-enabled when the first task + starts to run. */ + portDISABLE_INTERRUPTS(); + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to the task that will run first. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + + /* If configGENERATE_RUN_TIME_STATS is defined then the following + macro must be defined to configure the timer/counter used to generate + the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS + is set to 0 and the following line fails to build then ensure you do not + have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your + FreeRTOSConfig.h file. */ + portCONFIGURE_TIMER_FOR_RUN_TIME_STATS(); + + traceTASK_SWITCHED_IN(); + + /* Setting up the timer tick is hardware specific and thus in the + portable interface. */ + if( xPortStartScheduler() != pdFALSE ) + { + /* Should not reach here as if the scheduler is running the + function will not return. */ + } + else + { + /* Should only reach here if a task calls xTaskEndScheduler(). */ + } + } + else + { + /* This line will only be reached if the kernel could not be started, + because there was not enough FreeRTOS heap to create the idle task + or the timer task. */ + configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ); + } + + /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, + meaning xIdleTaskHandle is not used anywhere else. */ + ( void ) xIdleTaskHandle; +} +/*-----------------------------------------------------------*/ + +void vTaskEndScheduler( void ) +{ + /* Stop the scheduler interrupts and call the portable scheduler end + routine so the original ISRs can be restored if necessary. The port + layer must ensure interrupts enable bit is left in the correct state. */ + portDISABLE_INTERRUPTS(); + xSchedulerRunning = pdFALSE; + vPortEndScheduler(); +} +/*----------------------------------------------------------*/ + +void vTaskSuspendAll( void ) +{ + /* A critical section is not required as the variable is of type + BaseType_t. Please read Richard Barry's reply in the following link to a + post in the FreeRTOS support forum before reporting this as a bug! - + http://goo.gl/wu4acr */ + ++uxSchedulerSuspended; + portMEMORY_BARRIER(); +} +/*----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) + { + TickType_t xReturn; + UBaseType_t uxHigherPriorityReadyTasks = pdFALSE; + + /* uxHigherPriorityReadyTasks takes care of the case where + configUSE_PREEMPTION is 0, so there may be tasks above the idle priority + task that are in the Ready state, even though the idle task is + running. */ + #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + { + if( uxTopReadyPriority > tskIDLE_PRIORITY ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #else + { + const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01; + + /* When port optimised task selection is used the uxTopReadyPriority + variable is used as a bit map. If bits other than the least + significant bit are set then there are tasks that have a priority + above the idle priority that are in the Ready state. This takes + care of the case where the co-operative scheduler is in use. */ + if( uxTopReadyPriority > uxLeastSignificantBit ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #endif + + if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + { + xReturn = 0; + } + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + { + /* There are other idle priority tasks in the ready state. If + time slicing is used then the very next tick interrupt must be + processed. */ + xReturn = 0; + } + else if( uxHigherPriorityReadyTasks != pdFALSE ) + { + /* There are tasks in the Ready state that have a priority above the + idle priority. This path can only be reached if + configUSE_PREEMPTION is 0. */ + xReturn = 0; + } + else + { + xReturn = xNextTaskUnblockTime - xTickCount; + } + + return xReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskResumeAll( void ) +{ +TCB_t *pxTCB = NULL; +BaseType_t xAlreadyYielded = pdFALSE; + + /* If uxSchedulerSuspended is zero then this function does not match a + previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + /* It is possible that an ISR caused a task to be removed from an event + list while the scheduler was suspended. If this was the case then the + removed task will have been added to the xPendingReadyList. Once the + scheduler has been resumed it is safe to move all the pending ready + tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + --uxSchedulerSuspended; + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + { + /* Move any readied tasks from the pending list into the + appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* If the moved task has a priority higher than the current + task then a yield must be performed. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + which may have prevented the next unblock time from being + re-calculated, in which case re-calculate it now. Mainly + important for low power tickless implementations, where + this can prevent an unnecessary exit from low power + state. */ + prvResetNextTaskUnblockTime(); + } + + /* If any ticks occurred while the scheduler was suspended then + they should be processed now. This ensures the tick count does + not slip, and that any delayed tasks are resumed at the correct + time. */ + { + UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */ + + if( uxPendedCounts > ( UBaseType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + --uxPendedCounts; + } while( uxPendedCounts > ( UBaseType_t ) 0U ); + + uxPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPending != pdFALSE ) + { + #if( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xAlreadyYielded; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCount( void ) +{ +TickType_t xTicks; + + /* Critical section required if running on a 16 bit processor. */ + portTICK_TYPE_ENTER_CRITICAL(); + { + xTicks = xTickCount; + } + portTICK_TYPE_EXIT_CRITICAL(); + + return xTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCountFromISR( void ) +{ +TickType_t xReturn; +UBaseType_t uxSavedInterruptStatus; + + /* RTOS ports that support interrupt nesting have the concept of a maximum + system call (or maximum API call) interrupt priority. Interrupts that are + above the maximum system call priority are kept permanently enabled, even + when the RTOS kernel is in a critical section, but cannot make any calls to + FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has been + assigned a priority above the configured maximum system call priority. + Only FreeRTOS functions that end in FromISR can be called from interrupts + that have been assigned a priority at or (logically) below the maximum + system call interrupt priority. FreeRTOS maintains a separate interrupt + safe API to ensure interrupt entry is as fast and as simple as possible. + More information (albeit Cortex-M specific) is provided on the following + link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxTaskGetNumberOfTasks( void ) +{ + /* A critical section is not required because the variables are of type + BaseType_t. */ + return uxCurrentNumberOfTasks; +} +/*-----------------------------------------------------------*/ + +char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +TCB_t *pxTCB; + + /* If null is passed in here then the name of the calling task is being + queried. */ + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + configASSERT( pxTCB ); + return &( pxTCB->pcTaskName[ 0 ] ); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) + { + TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Check each character in the name looking for a match or + mismatch. */ + xBreakLoop = pdFALSE; + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } + } + + if( pxReturn != NULL ) + { + /* The handle has been found. */ + break; + } + + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t uxQueue = configMAX_PRIORITIES; + TCB_t* pxTCB; + + /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ + configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); + + vTaskSuspendAll(); + { + /* Search the ready lists. */ + do + { + uxQueue--; + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery ); + + if( pxTCB != NULL ) + { + /* Found the handle. */ + break; + } + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Search the delayed lists. */ + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery ); + } + + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery ); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the suspended list. */ + pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery ); + } + } + #endif + + #if( INCLUDE_vTaskDelete == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the deleted list. */ + pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery ); + } + } + #endif + } + ( void ) xTaskResumeAll(); + + return pxTCB; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime ) + { + UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; + + vTaskSuspendAll(); + { + /* Is there a space in the array for each task in the system? */ + if( uxArraySize >= uxCurrentNumberOfTasks ) + { + /* Fill in an TaskStatus_t structure with information on each + task in the Ready state. */ + do + { + uxQueue--; + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ); + + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Fill in an TaskStatus_t structure with information on each + task in the Blocked state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ); + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ); + + #if( INCLUDE_vTaskDelete == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task that has been deleted but not yet cleaned up. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ); + } + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + each task in the Suspended state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ); + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1) + { + if( pulTotalRunTime != NULL ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); + #else + *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + } + } + #else + { + if( pulTotalRunTime != NULL ) + { + *pulTotalRunTime = 0; + } + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t xTaskGetIdleTaskHandle( void ) + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xIdleTaskHandle != NULL ) ); + return xIdleTaskHandle; + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +/* This conditional compilation should use inequality to 0, not equality to 1. +This is to ensure vTaskStepTick() is available when user defined low power mode +implementations require configUSE_TICKLESS_IDLE to be set to a value other than +1. */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + void vTaskStepTick( const TickType_t xTicksToJump ) + { + /* Correct the tick count value after a period during which the tick + was suppressed. Note this does *not* call the tick hook function for + each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) + { + TCB_t *pxTCB = xTask; + BaseType_t xReturn; + + configASSERT( pxTCB ); + + vTaskSuspendAll(); + { + /* A task can only be prematurely removed from the Blocked state if + it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. An + interrupt won't touch the xStateListItem because the + scheduler is suspended. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + the event list too. Interrupts can touch the event list item, + even though the scheduler is suspended, so a critical section + is used. */ + taskENTER_CRITICAL(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate context + switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should only be + performed if the unblocked task has a priority that is + equal to or higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Pend the yield to be performed when the scheduler + is unsuspended. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + else + { + xReturn = pdFAIL; + } + } + ( void ) xTaskResumeAll(); + + return xReturn; + } + +#endif /* INCLUDE_xTaskAbortDelay */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskIncrementTick( void ) +{ +TCB_t * pxTCB; +TickType_t xItemValue; +BaseType_t xSwitchRequired = pdFALSE; + + /* Called by the portable layer each time a tick interrupt occurs. + Increments the tick then checks to see if the new tick value will cause any + tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + + /* Increment the RTOS tick, switching the delayed and overflowed + delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + the queue in the order of their wake time - meaning once one task + has been found whose block time has not expired there is no need to + look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ;; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + to the maximum possible value so it is extremely + unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass + next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else + { + /* The delayed list is not empty, get the value of the + item at the head of the delayed list. This is the time + at which the task at the head of the delayed list must + be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + item value is the time at which the task at the head + of the blocked list must be removed from the Blocked + state - so record the item value in + xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + only be performed if the unblocked task has a + priority that is equal to or higher than the + currently executing task. */ + if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + processing time (time slice) if preemption is on, and the application + writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + count is being unwound (when the scheduler is being unlocked). */ + if( uxPendedTicks == ( UBaseType_t ) 0U ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + } + else + { + ++uxPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif + } + + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldPending != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ + + return xSwitchRequired; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction ) + { + TCB_t *xTCB; + + /* If xTask is NULL then it is the task hook of the calling task that is + getting set. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xTCB->pxTaskTag = pxHookFunction; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xReturn = pxTCB->pxTaskTag; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + the value can be accessed from an interrupt. */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = pxTCB->pxTaskTag; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter ) + { + TCB_t *xTCB; + BaseType_t xReturn; + + /* If xTask is NULL then we are calling our own task hook. */ + if( xTask == NULL ) + { + xTCB = pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + if( xTCB->pxTaskTag != NULL ) + { + xReturn = xTCB->pxTaskTag( pvParameter ); + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +void vTaskSwitchContext( void ) +{ + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + switch. */ + xYieldPending = pdTRUE; + } + else + { + xYieldPending = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + accumulated time so far. The time the task started running was + stored in ulTaskSwitchedInTime. Note that there is no overflow + protection here so count values are only valid until the timer + overflows. The guard against negative values is to protect + against suspect run time stat counter implementations - which + are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + structure specific to this task. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + } +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE + SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + + /* Place the event list item of the TCB in the appropriate event list. + This is placed in the list in priority order so the highest priority task + is the first to be woken by the event. The queue that contains the event + list is locked, preventing simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event groups implementation. */ + configASSERT( uxSchedulerSuspended != 0 ); + + /* Store the item value in the event list item. It is safe to access the + event list item here as interrupts won't access the event list item of a + task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Place the event list item of the TCB at the end of the appropriate event + list. It is safe to access the event list here because it is part of an + event group implementation - and interrupts don't access event groups + directly (instead they access them indirectly by pending function calls to + the task level). */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TIMERS == 1 ) + + void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) + { + configASSERT( pxEventList ); + + /* This function should not be called by application code hence the + 'Restricted' in its name. It is not part of the public API. It is + designed for use by kernel code, and has special calling requirements - + it should be called with the scheduler suspended. */ + + + /* Place the event list item of the TCB in the appropriate event list. + In this case it is assume that this is the only task that is going to + be waiting on this event list, so the faster vListInsertEnd() function + can be used in place of vListInsert. */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + /* If the task should block indefinitely then set the block time to a + value that will be recognised as an indefinite delay inside the + prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +{ +TCB_t *pxUnblockedTCB; +BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + be removed as it is known to be the highest priority. Remove the TCB from + the delayed list, and add it to the ready list. + + If an event is for a queue that is locked then this function will never + get called - the lock count on the queue will get modified instead. This + means exclusive access to the event list is guaranteed here. + + This function assumes that a check has already been made to ensure that + pxEventList is not empty. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + might be set to the blocked task's time out time. If the task is + unblocked for a reason other than a timeout xNextTaskUnblockTime is + normally left unchanged, because it is automatically reset to a new + value when the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter sleep mode + at the earliest possible time - so reset xNextTaskUnblockTime here to + ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + priority than the calling task. This allows the calling task to know if + it should force a context switch now. */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) +{ +TCB_t *pxUnblockedTCB; + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + + /* Store the new item value in the event list. */ + listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Remove the event list form the event flag. Interrupts do not access + event flags. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( pxEventListItem ); + + /* Remove the task from the delayed list and add it to the ready list. The + scheduler is suspended so interrupts will not be accessing the ready + lists. */ + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The unblocked task has a priority above that of the calling task, so + a context switch is required. This function is called with the + scheduler suspended so xYieldPending is set so the context switch + occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPending = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + configASSERT( pxTimeOut ); + taskENTER_CRITICAL(); + { + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + /* For internal use only as it does not use a critical section. */ + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; +} +/*-----------------------------------------------------------*/ + +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) +{ +BaseType_t xReturn; + + configASSERT( pxTimeOut ); + configASSERT( pxTicksToWait ); + + taskENTER_CRITICAL(); + { + /* Minor optimisation. The tick count cannot change in this block. */ + const TickType_t xConstTickCount = xTickCount; + const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE ) + { + /* The delay was aborted, which is not the same as a time out, + but has the same result. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + xReturn = pdTRUE; + } + else + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + if( *pxTicksToWait == portMAX_DELAY ) + { + /* If INCLUDE_vTaskSuspend is set to 1 and the block time + specified is the maximum block time then the task should block + indefinitely, and therefore never time out. */ + xReturn = pdFALSE; + } + else + #endif + + if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */ + { + /* The tick count is greater than the time at which + vTaskSetTimeout() was called, but has also overflowed since + vTaskSetTimeOut() was called. It must have wrapped all the way + around and gone past again. This passed since vTaskSetTimeout() + was called. */ + xReturn = pdTRUE; + } + else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ + { + /* Not a genuine timeout. Adjust parameters for time remaining. */ + *pxTicksToWait -= xElapsedTime; + vTaskInternalSetTimeOutState( pxTimeOut ); + xReturn = pdFALSE; + } + else + { + *pxTicksToWait = 0; + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskMissedYield( void ) +{ + xYieldPending = pdTRUE; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) + { + UBaseType_t uxReturn; + TCB_t const *pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + uxReturn = pxTCB->uxTaskNumber; + } + else + { + uxReturn = 0U; + } + + return uxReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle ) + { + TCB_t * pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + pxTCB->uxTaskNumber = uxHandle; + } + } + +#endif /* configUSE_TRACE_FACILITY */ + +/* + * ----------------------------------------------------------- + * The Idle task. + * ---------------------------------------------------------- + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvIdleTask( void *pvParameters ); + * + */ +static portTASK_FUNCTION( prvIdleTask, pvParameters ) +{ + /* Stop warnings. */ + ( void ) pvParameters; + + /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE + SCHEDULER IS STARTED. **/ + + /* In case a task that has a secure context deletes itself, in which case + the idle task is responsible for deleting the task's secure context, if + any. */ + portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + + for( ;; ) + { + /* See if any tasks have deleted themselves - if so then the idle task + is responsible for freeing the deleted task's TCB and stack. */ + prvCheckTasksWaitingTermination(); + + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + see if any other task has become available. If we are using + preemption we don't need to do this as any task becoming available + will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + timesliced. If a task that is sharing the idle priority is ready + to run then the idle task should yield before the end of the + timeslice. + + A critical region is not required here as we are just reading from + the list, and an occasional incorrect value will not matter. If + the ready list at the idle priority contains more than one task + then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_IDLE_HOOK == 1 ) + { + extern void vApplicationIdleHook( void ); + + /* Call the user defined function from within the idle task. This + allows the application designer to add background functionality + without the overhead of a separate task. + NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationIdleHook(); + } + #endif /* configUSE_IDLE_HOOK */ + + /* This conditional compilation should use inequality to 0, not equality + to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when + user defined low power mode implementations require + configUSE_TICKLESS_IDLE to be set to a value other than 1. */ + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + TickType_t xExpectedIdleTime; + + /* It is not desirable to suspend then resume the scheduler on + each iteration of the idle task. Therefore, a preliminary + test of the expected idle time is performed without the + scheduler suspended. The result here is not necessarily + valid. */ + xExpectedIdleTime = prvGetExpectedIdleTime(); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + vTaskSuspendAll(); + { + /* Now the scheduler is suspended, the expected idle + time can be sampled again, and this time its value can + be used. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xExpectedIdleTime = prvGetExpectedIdleTime(); + + /* Define the following macro to set xExpectedIdleTime to 0 + if the application does not want + portSUPPRESS_TICKS_AND_SLEEP() to be called. */ + configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime ); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + traceLOW_POWER_IDLE_BEGIN(); + portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ); + traceLOW_POWER_IDLE_END(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICKLESS_IDLE */ + } +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TICKLESS_IDLE != 0 ) + + eSleepModeStatus eTaskConfirmSleepModeStatus( void ) + { + /* The idle task exists in addition to the application tasks. */ + const UBaseType_t uxNonApplicationTasks = 1; + eSleepModeStatus eReturn = eStandardSleep; + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + { + /* A task was made ready while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xYieldPending != pdFALSE ) + { + /* A yield was pended while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else + { + /* If all the tasks are in the suspended list (which might mean they + have an infinite block time rather than actually being suspended) + then it is safe to turn all clocks off and just wait for external + interrupts. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) + { + eReturn = eNoTasksWaitingTimeout; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return eReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue ) + { + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; + } + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) + { + void *pvReturn = NULL; + TCB_t *pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; + } + else + { + pvReturn = NULL; + } + + return pvReturn; + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions ) + { + TCB_t *pxTCB; + + /* If null is passed in here then we are modifying the MPU settings of + the calling task. */ + pxTCB = prvGetTCBFromHandle( xTaskToModify ); + + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseTaskLists( void ) +{ +UBaseType_t uxPriority; + + for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) + { + vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) ); + } + + vListInitialise( &xDelayedTaskList1 ); + vListInitialise( &xDelayedTaskList2 ); + vListInitialise( &xPendingReadyList ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + vListInitialise( &xTasksWaitingTermination ); + } + #endif /* INCLUDE_vTaskDelete */ + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + vListInitialise( &xSuspendedTaskList ); + } + #endif /* INCLUDE_vTaskSuspend */ + + /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList + using list2. */ + pxDelayedTaskList = &xDelayedTaskList1; + pxOverflowDelayedTaskList = &xDelayedTaskList2; +} +/*-----------------------------------------------------------*/ + +static void prvCheckTasksWaitingTermination( void ) +{ + + /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/ + + #if ( INCLUDE_vTaskDelete == 1 ) + { + TCB_t *pxTCB; + + /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() + being called too often in the idle task. */ + while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + taskENTER_CRITICAL(); + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); + } + } + #endif /* INCLUDE_vTaskDelete */ +} +/*-----------------------------------------------------------*/ + +#if( configUSE_TRACE_FACILITY == 1 ) + + void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) + { + TCB_t *pxTCB; + + /* xTask is NULL then get the state of the calling task. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + + #if ( configUSE_MUTEXES == 1 ) + { + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; + } + #else + { + pxTaskStatus->uxBasePriority = 0; + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + value of eState passed into this function is eInvalid - otherwise the + state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCB ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + chance it is actually just blocked indefinitely - so really + it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + vTaskSuspendAll(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + ( void ) xTaskResumeAll(); + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) + { + configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB; + UBaseType_t uxTask = 0; + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Populate an TaskStatus_t structure within the + pxTaskStatusArray array for each task that is referenced from + pxList. See the definition of TaskStatus_t in task.h for the + meaning of each TaskStatus_t structure member. */ + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); + uxTask++; + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) + { + uint32_t ulCount = 0U; + + while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE ) + { + pucStackByte -= portSTACK_GROWTH; + ulCount++; + } + + ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */ + + return ( configSTACK_DEPTH_TYPE ) ulCount; + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + same except for their return type. Using configSTACK_DEPTH_TYPE allows the + user to determine the return type. It gets around the problem of the value + overflowing on 8-bit types without breaking backward compatibility for + applications that expect an 8-bit return type. */ + configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + configSTACK_DEPTH_TYPE uxReturn; + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are + the same except for their return type. Using configSTACK_DEPTH_TYPE + allows the user to determine the return type. It gets around the + problem of the value overflowing on 8-bit types without breaking + backward compatibility for applications that expect an 8-bit return + type. */ + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + uint8_t *pucEndOfStack; + UBaseType_t uxReturn; + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t *pxTCB ) + { + /* This call is required specifically for the TriCore port. It must be + above the vPortFree() calls. The call is also used by ports/demos that + want to allocate and clean RAM statically. */ + portCLEAN_UP_TCB( pxTCB ); + + /* Free up the memory allocated by the scheduler for the task. It is up + to the task to free any memory allocated at the application level. */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) + { + /* The task can only have been allocated dynamically - free both + the stack and TCB. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* The task could have been allocated statically or dynamically, so + check what was statically allocated before trying to free the + memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) + { + /* Both the stack and TCB were allocated dynamically, so both + must be freed. */ + vPortFree( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ); + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +static void prvResetNextTaskUnblockTime( void ) +{ +TCB_t *pxTCB; + + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The new current delayed list is empty. Set xNextTaskUnblockTime to + the maximum possible value so it is extremely unlikely that the + if( xTickCount >= xNextTaskUnblockTime ) test will pass until + there is an item in the delayed list. */ + xNextTaskUnblockTime = portMAX_DELAY; + } + else + { + /* The new current delayed list is not empty, get the value of + the item at the head of the delayed list. This is the time at + which the task at the head of the delayed list should be removed + from the Blocked state. */ + ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) ); + } +} +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + /* A critical section is not required as this is not called from + an interrupt and the current TCB will always be the same for any + individual execution thread. */ + xReturn = pxCurrentTCB; + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + + BaseType_t xTaskGetSchedulerState( void ) + { + BaseType_t xReturn; + + if( xSchedulerRunning == pdFALSE ) + { + xReturn = taskSCHEDULER_NOT_STARTED; + } + else + { + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxMutexHolderTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + /* If the mutex was given back by an interrupt while the queue was + locked then the mutex holder might now be NULL. _RB_ Is this still + needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + the task attempting to obtain the mutex then it will temporarily + inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + { + /* Adjust the mutex holder state to account for its new + priority. Only reset the event list item value if the value is + not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task being modified is in the ready state it will need + to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + } + + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + + /* Inheritance occurred. */ + xReturn = pdTRUE; + } + else + { + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + priority of the task attempting to take the mutex, but the + current priority of the mutex holder is not lower than the + priority of the task attempting to take the mutex. + Therefore the mutex holder must have already inherited a + priority, but inheritance would have occurred if that had + not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + if( pxMutexHolder != NULL ) + { + /* A task can only have an inherited priority if it holds the mutex. + If the mutex is held by a task then it cannot be given from an + interrupt, and if a mutex is given by the holding task then it must + be the running state task. */ + configASSERT( pxTCB == pxCurrentTCB ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + { + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + { + /* A task can only have an inherited priority if it holds + the mutex. If the mutex is held by a task then it cannot be + given from an interrupt, and if a mutex is given by the + holding task then it must be the running state task. Remove + the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + any other purpose if this task is running, and it must be + running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + This is only actually required in the corner case whereby + multiple mutexes were held and the mutexes were given back + in an order different to that in which they were taken. + If a context switch did not occur when the first mutex was + returned, even if a task was waiting on it, then a context + switch should occur when the last mutex is returned whether + a task is waiting on it or not. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask ) + { + TCB_t * const pxTCB = pxMutexHolder; + UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; + const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; + + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); + + /* Determine the priority to which the priority of the task that + holds the mutex should be set. This will be the greater of the + holding task's base priority and the priority of the highest + priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) + { + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } + + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + simplification in the priority inheritance implementation. If + the task that holds the mutex is also holding other mutexes then + the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + mutex it was trying to obtain then it cannot of inherited + its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); + + /* Disinherit the priority, remembering the previous + priority to facilitate determining the subject task's + state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the running task is not the task that holds the mutex + then the task that holds the mutex could be in either the + Ready, Blocked or Suspended states. Only remove the task + from its current state list if it is in the Ready state as + the task's priority is going to change and there is one + Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This is not the interrupt safe version of the enter critical + function so assert() if it is being called from an interrupt + context. Only API functions that end in "FromISR" can be used in an + interrupt. Only assert if the critical nesting count is 1 to + protect against recursive calls if the assert function also uses a + critical section. */ + if( pxCurrentTCB->uxCriticalNesting == 1 ) + { + portASSERT_IF_IN_ISR(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( pxCurrentTCB->uxCriticalNesting > 0U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portENABLE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) + { + size_t x; + + /* Start by copying the entire string. */ + strcpy( pcBuffer, pcTaskName ); + + /* Pad the end of the string with spaces to ensure columns line up when + printed out. */ + for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + { + pcBuffer[ x ] = ' '; + } + + /* Terminate. */ + pcBuffer[ x ] = ( char ) 0x00; + + /* Return the new end of string. */ + return &( pcBuffer[ x ] ); + } + +#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskList( char * pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + char cStatus; + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that + * displays task names, states and stack usage. + * + * vTaskList() has a dependency on the sprintf() C library function that + * might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, + * and limited functionality implementation of sprintf() is provided in + * many of the FreeRTOS/Demo sub-directories in a file called + * printf-stdarg.c (note printf-stdarg.c does not provide a full + * snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskList(). + */ + + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! if + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL ); + + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + switch( pxTaskStatusArray[ x ].eCurrentState ) + { + case eRunning: cStatus = tskRUNNING_CHAR; + break; + + case eReady: cStatus = tskREADY_CHAR; + break; + + case eBlocked: cStatus = tskBLOCKED_CHAR; + break; + + case eSuspended: cStatus = tskSUSPENDED_CHAR; + break; + + case eDeleted: cStatus = tskDELETED_CHAR; + break; + + case eInvalid: /* Fall through. */ + default: /* Should not get here, but it is included + to prevent static checking errors. */ + cStatus = ( char ) 0x00; + break; + } + + /* Write the task name to the string, padding with spaces so it + can be printed in tabular form more easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + /* Write the rest of the string. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskGetRunTimeStats( char *pcWriteBuffer ) + { + TaskStatus_t *pxTaskStatusArray; + UBaseType_t uxArraySize, x; + uint32_t ulTotalTime, ulStatsAsPercentage; + + #if( configUSE_TRACE_FACILITY != 1 ) + { + #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). + } + #endif + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part + * of the uxTaskGetSystemState() output into a human readable table that + * displays the amount of time each task has spent in the Running state + * in both absolute and percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library + * function that might bloat the code size, use a lot of stack, and + * provide different results on different platforms. An alternative, + * tiny, third party, and limited functionality implementation of + * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in + * a file called printf-stdarg.c (note printf-stdarg.c does not provide + * a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskGetRunTimeStats(). + */ + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! If + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime ); + + /* For percentage calculations. */ + ulTotalTime /= 100UL; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > 0UL ) + { + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + /* What percentage of the total run time has the task used? + This will always be rounded down to the nearest integer. + ulTotalRunTimeDiv100 has already been divided by 100. */ + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime; + + /* Write the task name to the string, padding with + spaces so it can be printed in tabular form more + easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + if( ulStatsAsPercentage > 0UL ) + { + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + else + { + /* If the percentage is zero here then the task has + consumed less than 1% of the total run time. */ + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +TickType_t uxTaskResetEventItemValue( void ) +{ +TickType_t uxReturn; + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + TaskHandle_t pvTaskIncrementMutexHeldCount( void ) + { + /* If xSemaphoreCreateMutex() is called before any tasks have been created + then pxCurrentTCB will be NULL. */ + if( pxCurrentTCB != NULL ) + { + ( pxCurrentTCB->uxMutexesHeld )++; + } + + return pxCurrentTCB; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) + { + uint32_t ulReturn; + + taskENTER_CRITICAL(); + { + /* Only block if the notification count is not already non-zero. */ + if( pxCurrentTCB->ulNotifiedValue == 0UL ) + { + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_TAKE_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_TAKE(); + ulReturn = pxCurrentTCB->ulNotifiedValue; + + if( ulReturn != 0UL ) + { + if( xClearCountOnExit != pdFALSE ) + { + pxCurrentTCB->ulNotifiedValue = 0UL; + } + else + { + pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait ) + { + BaseType_t xReturn; + + taskENTER_CRITICAL(); + { + /* Only block if a notification is not already pending. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* Clear bits in the task's notification value as bits may get + set by the notifying task or interrupt. This can be used to + clear the value to zero. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry; + + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_WAIT_BLOCK(); + + /* All ports are written to allow a yield in a critical + section (some will yield immediately, others wait until the + critical section exits) - but it is not something that + application code should ever do. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_WAIT(); + + if( pulNotificationValue != NULL ) + { + /* Output the current notification value, which may or may not + have changed. */ + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue; + } + + /* If ucNotifyValue is set then either the task never entered the + blocked state (because a notification was already pending) or the + task unblocked because of a notification. Otherwise the task + unblocked because of a timeout. */ + if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED ) + { + /* A notification was not received. */ + xReturn = pdFALSE; + } + else + { + /* A notification was already pending or a notification was + received while the task was waiting. */ + pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit; + xReturn = pdTRUE; + } + + pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue ) + { + TCB_t * pxTCB; + BaseType_t xReturn = pdPASS; + uint8_t ucOriginalNotifyState; + + configASSERT( xTaskToNotify ); + pxTCB = xTaskToNotify; + + taskENTER_CRITICAL(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction: + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + + break; + } + + traceTASK_NOTIFY(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + #if( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked waiting for a notification then + xNextTaskUnblockTime might be set to the blocked task's time + out time. If the task is unblocked for a reason other than + a timeout xNextTaskUnblockTime is normally left unchanged, + because it will automatically get reset to a new value when + the tick count equals xNextTaskUnblockTime. However if + tickless idling is used it might be more important to enter + sleep mode at the earliest possible time - so reset + xNextTaskUnblockTime here to ensure it is updated at the + earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits : + pxTCB->ulNotifiedValue |= ulValue; + break; + + case eIncrement : + ( pxTCB->ulNotifiedValue )++; + break; + + case eSetValueWithOverwrite : + pxTCB->ulNotifiedValue = ulValue; + break; + + case eSetValueWithoutOverwrite : + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + break; + + case eNoAction : + /* The task is being notified without its notify value being + updated. */ + break; + + default: + /* Should not get here if all enums are handled. + Artificially force an assert by testing a value the + compiler can't assume is const. */ + configASSERT( pxTCB->ulNotifiedValue == ~0UL ); + break; + } + + traceTASK_NOTIFY_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter to an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + + /* RTOS ports that support interrupt nesting have the concept of a + maximum system call (or maximum API call) interrupt priority. + Interrupts that are above the maximum system call priority are keep + permanently enabled, even when the RTOS kernel is in a critical section, + but cannot make any calls to FreeRTOS API functions. If configASSERT() + is defined in FreeRTOSConfig.h then + portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + failure if a FreeRTOS API function is called from an interrupt that has + been assigned a priority above the configured maximum system call + priority. Only FreeRTOS functions that end in FromISR can be called + from interrupts that have been assigned a priority at or (logically) + below the maximum system call interrupt priority. FreeRTOS maintains a + separate interrupt safe API to ensure interrupt entry is as fast and as + simple as possible. More information (albeit Cortex-M specific) is + provided on the following link: + http://www.freertos.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + ucOriginalNotifyState = pxTCB->ucNotifyState; + pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; + + /* 'Giving' is equivalent to incrementing a count in a counting + semaphore. */ + ( pxTCB->ulNotifiedValue )++; + + traceTASK_NOTIFY_GIVE_FROM_ISR(); + + /* If the task is in the blocked state specifically to wait for a + notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + + /* Mark that a yield is pending in case the user is not + using the "xHigherPriorityTaskWoken" parameter in an ISR + safe FreeRTOS function. */ + xYieldPending = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ + +/*-----------------------------------------------------------*/ + +#if( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask ) + { + TCB_t *pxTCB; + BaseType_t xReturn; + + /* If null is passed in here then it is the calling task that is having + its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED ) + { + pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION; + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + TickType_t xTaskGetIdleRunTimeCounter( void ) + { + return xIdleTaskHandle->ulRunTimeCounter; + } +#endif +/*-----------------------------------------------------------*/ + +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) +{ +TickType_t xTimeToWake; +const TickType_t xConstTickCount = xTickCount; + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + /* About to enter a delayed list, so ensure the ucDelayAborted flag is + reset to pdFALSE so it can be detected as having been set to pdTRUE + when the task leaves the Blocked state. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Remove the task from the ready list before adding it to the blocked list + as the same list item is used for both lists. */ + if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* The current task must be in a ready list, so there is no need to + check, and the port reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) ) + { + /* Add the task to the suspended task list instead of a delayed task + list to ensure it is not woken by a timing event. It will block + indefinitely. */ + vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the + kernel will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow + list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list + is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the + head of the list of blocked tasks then xNextTaskUnblockTime + needs to be updated too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #else /* INCLUDE_vTaskSuspend */ + { + /* Calculate the time at which the task should be woken if the event + does not occur. This may overflow but this doesn't matter, the kernel + will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the head of the + list of blocked tasks then xNextTaskUnblockTime needs to be updated + too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */ + ( void ) xCanBlockIndefinitely; + } + #endif /* INCLUDE_vTaskSuspend */ +} + +/* Code below here allows additional code to be inserted into this source file, +especially where access to file scope functions and data is needed (for example +when performing module tests). */ + +#ifdef FREERTOS_MODULE_TEST + #include "tasks_test_access_functions.h" +#endif + + +#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) + + #include "freertos_tasks_c_additions.h" + + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + static void freertos_tasks_c_additions_init( void ) + { + FREERTOS_TASKS_C_ADDITIONS_INIT(); + } + #endif + +#endif + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c new file mode 100644 index 0000000..ea04327 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/FreeRTOS/Source/timers.c @@ -0,0 +1,1102 @@ +/* + * FreeRTOS Kernel V10.2.1 + * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining +all the API functions to use the MPU wrappers. That should only be done when +task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" + +#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) + #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified +because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined +for the header files above, but not in this file, in order to generate the +correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e9021 !e961 !e750. */ + + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. This #if is closed at the very bottom +of this file. If you want to include software timer functionality then ensure +configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#if ( configUSE_TIMERS == 1 ) + +/* Misc definitions. */ +#define tmrNO_DELAY ( TickType_t ) 0U + +/* The name assigned to the timer service task. This can be overridden by +defining trmTIMER_SERVICE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configTIMER_SERVICE_TASK_NAME + #define configTIMER_SERVICE_TASK_NAME "Tmr Svc" +#endif + +/* Bit definitions used in the ucStatus member of a timer structure. */ +#define tmrSTATUS_IS_ACTIVE ( ( uint8_t ) 0x01 ) +#define tmrSTATUS_IS_STATICALLY_ALLOCATED ( ( uint8_t ) 0x02 ) +#define tmrSTATUS_IS_AUTORELOAD ( ( uint8_t ) 0x04 ) + +/* The definition of the timers themselves. */ +typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + const char *pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks;/*<< How quickly and often the timer expires. */ + void *pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + #if( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + #endif + uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ +} xTIMER; + +/* The old xTIMER name is maintained above then typedefed to the new Timer_t +name below to enable the use of older kernel aware debuggers. */ +typedef xTIMER Timer_t; + +/* The definition of messages that can be sent and received on the timer queue. +Two types of message can be queued - messages that manipulate a software timer, +and messages that request the execution of a non-timer related callback. The +two message types are defined in two separate structures, xTimerParametersType +and xCallbackParametersType respectively. */ +typedef struct tmrTimerParameters +{ + TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ +} TimerParameter_t; + + +typedef struct tmrCallbackParameters +{ + PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ + void *pvParameter1; /* << The value that will be used as the callback functions first parameter. */ + uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ +} CallbackParameters_t; + +/* The structure that contains the two message types, along with an identifier +that is used to determine which message type is valid. */ +typedef struct tmrTimerQueueMessage +{ + BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + union + { + TimerParameter_t xTimerParameters; + + /* Don't include xCallbackParameters if it is not going to be used as + it makes the structure (and therefore the timer queue) larger. */ + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + CallbackParameters_t xCallbackParameters; + #endif /* INCLUDE_xTimerPendFunctionCall */ + } u; +} DaemonTaskMessage_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine +which static variables must be declared volatile. */ + +/* The list in which active timers are stored. Timers are referenced in expire +time order, with the nearest expiry time at the front of the list. Only the +timer service task is allowed to access these lists. +xActiveTimerList1 and xActiveTimerList2 could be at function scope but that +breaks some kernel aware debuggers, and debuggers that reply on removing the +static qualifier. */ +PRIVILEGED_DATA static List_t xActiveTimerList1 = { 0 }; +PRIVILEGED_DATA static List_t xActiveTimerList2 = { 0 }; +PRIVILEGED_DATA static List_t *pxCurrentTimerList = NULL; +PRIVILEGED_DATA static List_t *pxOverflowTimerList = NULL; + +/* A queue that is used to send commands to the timer service task. */ +PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; +PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + /* If static allocation is supported then the application must provide the + following callback function - which enables the application to optionally + provide the memory that will be used by the timer task as the task's stack + and TCB. */ + extern void vApplicationGetTimerTaskMemory( StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize ); + +#endif + +/* + * Initialise the infrastructure used by the timer service task if it has not + * been initialised already. + */ +static void prvCheckForValidListAndQueue( void ) PRIVILEGED_FUNCTION; + +/* + * The timer service task (daemon). Timer functionality is controlled by this + * task. Other tasks communicate with the timer service task using the + * xTimerQueue queue. + */ +static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + +/* + * Called by the timer service task to interpret and process a command it + * received on the timer queue. + */ +static void prvProcessReceivedCommands( void ) PRIVILEGED_FUNCTION; + +/* + * Insert the timer into either xActiveTimerList1, or xActiveTimerList2, + * depending on if the expire time causes a timer counter overflow. + */ +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) PRIVILEGED_FUNCTION; + +/* + * An active timer has reached its expire time. Reload the timer if it is an + * auto reload timer, then call its callback. + */ +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) PRIVILEGED_FUNCTION; + +/* + * The tick count has overflowed. Switch the timer lists after ensuring the + * current timer list does not still reference some timers. + */ +static void prvSwitchTimerLists( void ) PRIVILEGED_FUNCTION; + +/* + * Obtain the current tick count, setting *pxTimerListsWereSwitched to pdTRUE + * if a tick count overflow occurred since prvSampleTimeNow() was last called. + */ +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) PRIVILEGED_FUNCTION; + +/* + * If the timer list contains any active timers then return the expire time of + * the timer that will expire first and set *pxListWasEmpty to false. If the + * timer list does not contain any timers then return 0 and set *pxListWasEmpty + * to pdTRUE. + */ +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * If a timer has expired, process it. Otherwise, block the timer service task + * until either a timer does expire or a command is received. + */ +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) PRIVILEGED_FUNCTION; + +/* + * Called after a Timer_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +BaseType_t xTimerCreateTimerTask( void ) +{ +BaseType_t xReturn = pdFAIL; + + /* This function is called when the scheduler is started if + configUSE_TIMERS is set to 1. Check that the infrastructure used by the + timer service task has been created/initialised. If timers have already + been created then the initialisation will already have been performed. */ + prvCheckForValidListAndQueue(); + + if( xTimerQueue != NULL ) + { + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t *pxTimerTaskTCBBuffer = NULL; + StackType_t *pxTimerTaskStackBuffer = NULL; + uint32_t ulTimerTaskStackSize; + + vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize ); + xTimerTaskHandle = xTaskCreateStatic( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + ulTimerTaskStackSize, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + pxTimerTaskStackBuffer, + pxTimerTaskTCBBuffer ); + + if( xTimerTaskHandle != NULL ) + { + xReturn = pdPASS; + } + } + #else + { + xReturn = xTaskCreate( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + configTIMER_TASK_STACK_DEPTH, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + &xTimerTaskHandle ); + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + configASSERT( xReturn ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreate( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) + { + Timer_t *pxNewTimer; + + pxNewTimer = ( Timer_t * ) pvPortMalloc( sizeof( Timer_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of Timer_t is always a pointer to the timer's mame. */ + + if( pxNewTimer != NULL ) + { + /* Status is thus far zero as the timer is not created statically + and has not been started. The autoreload bit may get set in + prvInitialiseNewTimer. */ + pxNewTimer->ucStatus = 0x00; + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TimerHandle_t xTimerCreateStatic( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t *pxTimerBuffer ) + { + Timer_t *pxNewTimer; + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticTimer_t equals the size of the real timer + structure. */ + volatile size_t xSize = sizeof( StaticTimer_t ); + configASSERT( xSize == sizeof( Timer_t ) ); + ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ + } + #endif /* configASSERT_DEFINED */ + + /* A pointer to a StaticTimer_t structure MUST be provided, use it. */ + configASSERT( pxTimerBuffer ); + pxNewTimer = ( Timer_t * ) pxTimerBuffer; /*lint !e740 !e9087 StaticTimer_t is a pointer to a Timer_t, so guaranteed to be aligned and sized correctly (checked by an assert()), so this is safe. */ + + if( pxNewTimer != NULL ) + { + /* Timers can be created statically or dynamically so note this + timer was created statically in case it is later deleted. The + autoreload bit may get set in prvInitialiseNewTimer(). */ + pxNewTimer->ucStatus = tmrSTATUS_IS_STATICALLY_ALLOCATED; + + prvInitialiseNewTimer( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, pxCallbackFunction, pxNewTimer ); + } + + return pxNewTimer; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTimer( const char * const pcTimerName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + Timer_t *pxNewTimer ) +{ + /* 0 is not a valid value for xTimerPeriodInTicks. */ + configASSERT( ( xTimerPeriodInTicks > 0 ) ); + + if( pxNewTimer != NULL ) + { + /* Ensure the infrastructure used by the timer service task has been + created/initialised. */ + prvCheckForValidListAndQueue(); + + /* Initialise the timer structure members using the function + parameters. */ + pxNewTimer->pcTimerName = pcTimerName; + pxNewTimer->xTimerPeriodInTicks = xTimerPeriodInTicks; + pxNewTimer->pvTimerID = pvTimerID; + pxNewTimer->pxCallbackFunction = pxCallbackFunction; + vListInitialiseItem( &( pxNewTimer->xTimerListItem ) ); + if( uxAutoReload != pdFALSE ) + { + pxNewTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + traceTIMER_CREATE( pxNewTimer ); + } +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, const BaseType_t xCommandID, const TickType_t xOptionalValue, BaseType_t * const pxHigherPriorityTaskWoken, const TickType_t xTicksToWait ) +{ +BaseType_t xReturn = pdFAIL; +DaemonTaskMessage_t xMessage; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) + { + if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + } + else + { + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); + } + } + else + { + xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + } + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; +} +/*-----------------------------------------------------------*/ + +TaskHandle_t xTimerGetTimerDaemonTaskHandle( void ) +{ + /* If xTimerGetTimerDaemonTaskHandle() is called before the scheduler has been + started, then xTimerTaskHandle will be NULL. */ + configASSERT( ( xTimerTaskHandle != NULL ) ); + return xTimerTaskHandle; +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->xTimerPeriodInTicks; +} +/*-----------------------------------------------------------*/ + +void vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) +{ +Timer_t * pxTimer = xTimer; + + configASSERT( xTimer ); + taskENTER_CRITICAL(); + { + if( uxAutoReload != pdFALSE ) + { + pxTimer->ucStatus |= tmrSTATUS_IS_AUTORELOAD; + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD; + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) +{ +Timer_t * pxTimer = xTimer; +TickType_t xReturn; + + configASSERT( xTimer ); + xReturn = listGET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ) ); + return xReturn; +} +/*-----------------------------------------------------------*/ + +const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + return pxTimer->pcTimerName; +} +/*-----------------------------------------------------------*/ + +static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) +{ +BaseType_t xResult; +Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Remove the timer from the list of active timers. A check has already + been performed to ensure the list is not empty. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* If the timer is an auto reload timer then calculate the next + expiry time and re-insert the timer in the list of active timers. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* The timer is inserted into a list using a time relative to anything + other than the current time. It will therefore be inserted into the + correct list relative to the time this task thinks it is now. */ + if( prvInsertTimerInActiveList( pxTimer, ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ), xTimeNow, xNextExpireTime ) != pdFALSE ) + { + /* The timer expired before it was added to the active timer + list. Reload it now. */ + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + mtCOVERAGE_TEST_MARKER(); + } + + /* Call the timer callback. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); +} +/*-----------------------------------------------------------*/ + +static portTASK_FUNCTION( prvTimerTask, pvParameters ) +{ +TickType_t xNextExpireTime; +BaseType_t xListWasEmpty; + + /* Just to avoid compiler warnings. */ + ( void ) pvParameters; + + #if( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) + { + extern void vApplicationDaemonTaskStartupHook( void ); + + /* Allow the application writer to execute some code in the context of + this task at the point the task starts executing. This is useful if the + application includes initialisation code that would benefit from + executing after the scheduler has been started. */ + vApplicationDaemonTaskStartupHook(); + } + #endif /* configUSE_DAEMON_TASK_STARTUP_HOOK */ + + for( ;; ) + { + /* Query the timers list to see if it contains any timers, and if so, + obtain the time at which the next timer will expire. */ + xNextExpireTime = prvGetNextExpireTime( &xListWasEmpty ); + + /* If a timer has expired, process it. Otherwise, block this task + until either a timer does expire, or a command is received. */ + prvProcessTimerOrBlockTask( xNextExpireTime, xListWasEmpty ); + + /* Empty the command queue. */ + prvProcessReceivedCommands(); + } +} +/*-----------------------------------------------------------*/ + +static void prvProcessTimerOrBlockTask( const TickType_t xNextExpireTime, BaseType_t xListWasEmpty ) +{ +TickType_t xTimeNow; +BaseType_t xTimerListsWereSwitched; + + vTaskSuspendAll(); + { + /* Obtain the time now to make an assessment as to whether the timer + has expired or not. If obtaining the time causes the lists to switch + then don't process this timer as any timers that remained in the list + when the lists were switched will have been processed within the + prvSampleTimeNow() function. */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + if( xTimerListsWereSwitched == pdFALSE ) + { + /* The tick count has not overflowed, has the timer expired? */ + if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) + { + ( void ) xTaskResumeAll(); + prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); + } + else + { + /* The tick count has not overflowed, and the next expire + time has not been reached yet. This task should therefore + block to wait for the next expire time or a command to be + received - whichever comes first. The following line cannot + be reached unless xNextExpireTime > xTimeNow, except in the + case when the current timer list is empty. */ + if( xListWasEmpty != pdFALSE ) + { + /* The current timer list is empty - is the overflow list + also empty? */ + xListWasEmpty = listLIST_IS_EMPTY( pxOverflowTimerList ); + } + + vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); + + if( xTaskResumeAll() == pdFALSE ) + { + /* Yield to wait for either a command to arrive, or the + block time to expire. If a command arrived between the + critical section being exited and this yield then the yield + will not cause the task to block. */ + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + ( void ) xTaskResumeAll(); + } + } +} +/*-----------------------------------------------------------*/ + +static TickType_t prvGetNextExpireTime( BaseType_t * const pxListWasEmpty ) +{ +TickType_t xNextExpireTime; + + /* Timers are listed in expiry time order, with the head of the list + referencing the task that will expire first. Obtain the time at which + the timer with the nearest expiry time will expire. If there are no + active timers then just set the next expire time to 0. That will cause + this task to unblock when the tick count overflows, at which point the + timer lists will be switched and the next expiry time can be + re-assessed. */ + *pxListWasEmpty = listLIST_IS_EMPTY( pxCurrentTimerList ); + if( *pxListWasEmpty == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + } + else + { + /* Ensure the task unblocks when the tick count rolls over. */ + xNextExpireTime = ( TickType_t ) 0U; + } + + return xNextExpireTime; +} +/*-----------------------------------------------------------*/ + +static TickType_t prvSampleTimeNow( BaseType_t * const pxTimerListsWereSwitched ) +{ +TickType_t xTimeNow; +PRIVILEGED_DATA static TickType_t xLastTime = ( TickType_t ) 0U; /*lint !e956 Variable is only accessible to one task. */ + + xTimeNow = xTaskGetTickCount(); + + if( xTimeNow < xLastTime ) + { + prvSwitchTimerLists(); + *pxTimerListsWereSwitched = pdTRUE; + } + else + { + *pxTimerListsWereSwitched = pdFALSE; + } + + xLastTime = xTimeNow; + + return xTimeNow; +} +/*-----------------------------------------------------------*/ + +static BaseType_t prvInsertTimerInActiveList( Timer_t * const pxTimer, const TickType_t xNextExpiryTime, const TickType_t xTimeNow, const TickType_t xCommandTime ) +{ +BaseType_t xProcessTimerNow = pdFALSE; + + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xNextExpiryTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + + if( xNextExpiryTime <= xTimeNow ) + { + /* Has the expiry time elapsed between the command to start/reset a + timer was issued, and the time the command was processed? */ + if( ( ( TickType_t ) ( xTimeNow - xCommandTime ) ) >= pxTimer->xTimerPeriodInTicks ) /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + { + /* The time between a command being issued and the command being + processed actually exceeds the timers period. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxOverflowTimerList, &( pxTimer->xTimerListItem ) ); + } + } + else + { + if( ( xTimeNow < xCommandTime ) && ( xNextExpiryTime >= xCommandTime ) ) + { + /* If, since the command was issued, the tick count has overflowed + but the expiry time has not, then the timer must have already passed + its expiry time and should be processed immediately. */ + xProcessTimerNow = pdTRUE; + } + else + { + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + } + + return xProcessTimerNow; +} +/*-----------------------------------------------------------*/ + +static void prvProcessReceivedCommands( void ) +{ +DaemonTaskMessage_t xMessage; +Timer_t *pxTimer; +BaseType_t xTimerListsWereSwitched, xResult; +TickType_t xTimeNow; + + while( xQueueReceive( xTimerQueue, &xMessage, tmrNO_DELAY ) != pdFAIL ) /*lint !e603 xMessage does not have to be initialised as it is passed out, not in, and it is not used unless xQueueReceive() returns pdTRUE. */ + { + #if ( INCLUDE_xTimerPendFunctionCall == 1 ) + { + /* Negative commands are pended function calls rather than timer + commands. */ + if( xMessage.xMessageID < ( BaseType_t ) 0 ) + { + const CallbackParameters_t * const pxCallback = &( xMessage.u.xCallbackParameters ); + + /* The timer uses the xCallbackParameters member to request a + callback be executed. Check the callback is not NULL. */ + configASSERT( pxCallback ); + + /* Call the function. */ + pxCallback->pxCallbackFunction( pxCallback->pvParameter1, pxCallback->ulParameter2 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* INCLUDE_xTimerPendFunctionCall */ + + /* Commands that are positive are timer commands rather than pended + function calls. */ + if( xMessage.xMessageID >= ( BaseType_t ) 0 ) + { + /* The messages uses the xTimerParameters member to work on a + software timer. */ + pxTimer = xMessage.u.xTimerParameters.pxTimer; + + if( listIS_CONTAINED_WITHIN( NULL, &( pxTimer->xTimerListItem ) ) == pdFALSE ) /*lint !e961. The cast is only redundant when NULL is passed into the macro. */ + { + /* The timer is in a list, remove it. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceTIMER_COMMAND_RECEIVED( pxTimer, xMessage.xMessageID, xMessage.u.xTimerParameters.xMessageValue ); + + /* In this case the xTimerListsWereSwitched parameter is not used, but + it must be present in the function call. prvSampleTimeNow() must be + called after the message is received from xTimerQueue so there is no + possibility of a higher priority task adding a message to the message + queue with a time that is ahead of the timer daemon task (because it + pre-empted the timer daemon task after the xTimeNow value was set). */ + xTimeNow = prvSampleTimeNow( &xTimerListsWereSwitched ); + + switch( xMessage.xMessageID ) + { + case tmrCOMMAND_START : + case tmrCOMMAND_START_FROM_ISR : + case tmrCOMMAND_RESET : + case tmrCOMMAND_RESET_FROM_ISR : + case tmrCOMMAND_START_DONT_TRACE : + /* Start or restart a timer. */ + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + if( prvInsertTimerInActiveList( pxTimer, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, xTimeNow, xMessage.u.xTimerParameters.xMessageValue ) != pdFALSE ) + { + /* The timer expired before it was added to the active + timer list. Process it now. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + traceTIMER_EXPIRED( pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xMessage.u.xTimerParameters.xMessageValue + pxTimer->xTimerPeriodInTicks, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + break; + + case tmrCOMMAND_STOP : + case tmrCOMMAND_STOP_FROM_ISR : + /* The timer has already been removed from the active list. */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + break; + + case tmrCOMMAND_CHANGE_PERIOD : + case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR : + pxTimer->ucStatus |= tmrSTATUS_IS_ACTIVE; + pxTimer->xTimerPeriodInTicks = xMessage.u.xTimerParameters.xMessageValue; + configASSERT( ( pxTimer->xTimerPeriodInTicks > 0 ) ); + + /* The new period does not really have a reference, and can + be longer or shorter than the old one. The command time is + therefore set to the current time, and as the period cannot + be zero the next expiry time can only be in the future, + meaning (unlike for the xTimerStart() case above) there is + no fail case that needs to be handled here. */ + ( void ) prvInsertTimerInActiveList( pxTimer, ( xTimeNow + pxTimer->xTimerPeriodInTicks ), xTimeNow, xTimeNow ); + break; + + case tmrCOMMAND_DELETE : + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* The timer has already been removed from the active list, + just free up the memory if the memory was dynamically + allocated. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) + { + vPortFree( pxTimer ); + } + else + { + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + } + #else + { + /* If dynamic allocation is not enabled, the memory + could not have been dynamically allocated. So there is + no need to free the memory - just mark the timer as + "not active". */ + pxTimer->ucStatus &= ~tmrSTATUS_IS_ACTIVE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + break; + + default : + /* Don't expect to get here. */ + break; + } + } + } +} +/*-----------------------------------------------------------*/ + +static void prvSwitchTimerLists( void ) +{ +TickType_t xNextExpireTime, xReloadTime; +List_t *pxTemp; +Timer_t *pxTimer; +BaseType_t xResult; + + /* The tick count has overflowed. The timer lists must be switched. + If there are any timers still referenced from the current timer list + then they must have expired and should be processed before the lists + are switched. */ + while( listLIST_IS_EMPTY( pxCurrentTimerList ) == pdFALSE ) + { + xNextExpireTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxCurrentTimerList ); + + /* Remove the timer from the list. */ + pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTimer->xTimerListItem ) ); + traceTIMER_EXPIRED( pxTimer ); + + /* Execute its callback, then send a command to restart the timer if + it is an auto-reload timer. It cannot be restarted here as the lists + have not yet been switched. */ + pxTimer->pxCallbackFunction( ( TimerHandle_t ) pxTimer ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) != 0 ) + { + /* Calculate the reload value, and if the reload value results in + the timer going into the same timer list then it has already expired + and the timer should be re-inserted into the current list so it is + processed again within this loop. Otherwise a command should be sent + to restart the timer to ensure it is only inserted into a list after + the lists have been swapped. */ + xReloadTime = ( xNextExpireTime + pxTimer->xTimerPeriodInTicks ); + if( xReloadTime > xNextExpireTime ) + { + listSET_LIST_ITEM_VALUE( &( pxTimer->xTimerListItem ), xReloadTime ); + listSET_LIST_ITEM_OWNER( &( pxTimer->xTimerListItem ), pxTimer ); + vListInsert( pxCurrentTimerList, &( pxTimer->xTimerListItem ) ); + } + else + { + xResult = xTimerGenericCommand( pxTimer, tmrCOMMAND_START_DONT_TRACE, xNextExpireTime, NULL, tmrNO_DELAY ); + configASSERT( xResult ); + ( void ) xResult; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + pxTemp = pxCurrentTimerList; + pxCurrentTimerList = pxOverflowTimerList; + pxOverflowTimerList = pxTemp; +} +/*-----------------------------------------------------------*/ + +static void prvCheckForValidListAndQueue( void ) +{ + /* Check that the list from which active timers are referenced, and the + queue used to communicate with the timer service, have been + initialised. */ + taskENTER_CRITICAL(); + { + if( xTimerQueue == NULL ) + { + vListInitialise( &xActiveTimerList1 ); + vListInitialise( &xActiveTimerList2 ); + pxCurrentTimerList = &xActiveTimerList1; + pxOverflowTimerList = &xActiveTimerList2; + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* The timer queue is allocated statically in case + configSUPPORT_DYNAMIC_ALLOCATION is 0. */ + static StaticQueue_t xStaticTimerQueue; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + static uint8_t ucStaticTimerQueueStorage[ ( size_t ) configTIMER_QUEUE_LENGTH * sizeof( DaemonTaskMessage_t ) ]; /*lint !e956 Ok to declare in this manner to prevent additional conditional compilation guards in other locations. */ + + xTimerQueue = xQueueCreateStatic( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, ( UBaseType_t ) sizeof( DaemonTaskMessage_t ), &( ucStaticTimerQueueStorage[ 0 ] ), &xStaticTimerQueue ); + } + #else + { + xTimerQueue = xQueueCreate( ( UBaseType_t ) configTIMER_QUEUE_LENGTH, sizeof( DaemonTaskMessage_t ) ); + } + #endif + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + { + if( xTimerQueue != NULL ) + { + vQueueAddToRegistry( xTimerQueue, "TmrQ" ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configQUEUE_REGISTRY_SIZE */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) +{ +BaseType_t xReturn; +Timer_t *pxTimer = xTimer; + + configASSERT( xTimer ); + + /* Is the timer in the list of active timers? */ + taskENTER_CRITICAL(); + { + if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} /*lint !e818 Can't be pointer to const due to the typedef. */ +/*-----------------------------------------------------------*/ + +void *pvTimerGetTimerID( const TimerHandle_t xTimer ) +{ +Timer_t * const pxTimer = xTimer; +void *pvReturn; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pvReturn = pxTimer->pvTimerID; + } + taskEXIT_CRITICAL(); + + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vTimerSetTimerID( TimerHandle_t xTimer, void *pvNewID ) +{ +Timer_t * const pxTimer = xTimer; + + configASSERT( xTimer ); + + taskENTER_CRITICAL(); + { + pxTimer->pvTimerID = pvNewID; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCallFromISR( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, BaseType_t *pxHigherPriorityTaskWoken ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK_FROM_ISR; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); + + tracePEND_FUNC_CALL_FROM_ISR( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if( INCLUDE_xTimerPendFunctionCall == 1 ) + + BaseType_t xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, void *pvParameter1, uint32_t ulParameter2, TickType_t xTicksToWait ) + { + DaemonTaskMessage_t xMessage; + BaseType_t xReturn; + + /* This function can only be called after a timer has been created or + after the scheduler has been started because, until then, the timer + queue does not exist. */ + configASSERT( xTimerQueue ); + + /* Complete the message with the function parameters and post it to the + daemon task. */ + xMessage.xMessageID = tmrCOMMAND_EXECUTE_CALLBACK; + xMessage.u.xCallbackParameters.pxCallbackFunction = xFunctionToPend; + xMessage.u.xCallbackParameters.pvParameter1 = pvParameter1; + xMessage.u.xCallbackParameters.ulParameter2 = ulParameter2; + + xReturn = xQueueSendToBack( xTimerQueue, &xMessage, xTicksToWait ); + + tracePEND_FUNC_CALL( xFunctionToPend, pvParameter1, ulParameter2, xReturn ); + + return xReturn; + } + +#endif /* INCLUDE_xTimerPendFunctionCall */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTimerGetTimerNumber( TimerHandle_t xTimer ) + { + return ( ( Timer_t * ) xTimer )->uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) + { + ( ( Timer_t * ) xTimer )->uxTimerNumber = uxTimerNumber; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +/* This entire source file will be skipped if the application is not configured +to include software timer functionality. If you want to include software timer +functionality then ensure configUSE_TIMERS is set to 1 in FreeRTOSConfig.h. */ +#endif /* configUSE_TIMERS == 1 */ + + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c new file mode 100644 index 0000000..9a30ec0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_lib.c @@ -0,0 +1,1367 @@ +/** + * @file + * Sequential API External module + * + * @defgroup netconn Netconn API + * @ingroup sequential_api + * Thread-safe, to be called from non-TCPIP threads only. + * TX/RX handling based on @ref netbuf (containing @ref pbuf) + * to avoid copying data around. + * + * @defgroup netconn_common Common functions + * @ingroup netconn + * For use with TCP and UDP + * + * @defgroup netconn_tcp TCP only + * @ingroup netconn + * TCP only functions + * + * @defgroup netconn_udp UDP only + * @ingroup netconn + * UDP only functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +/* This is the part of the API that is linked with + the application */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/api.h" +#include "lwip/memp.h" + +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/priv/tcpip_priv.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#include + +#define API_MSG_VAR_REF(name) API_VAR_REF(name) +#define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) +#define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, ERR_MEM) +#define API_MSG_VAR_ALLOC_RETURN_NULL(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name, NULL) +#define API_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_API_MSG, name) + +#if TCP_LISTEN_BACKLOG +/* need to allocate API message for accept so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) API_MSG_VAR_ALLOC(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) API_MSG_VAR_FREE(msg) +#else /* TCP_LISTEN_BACKLOG */ +#define API_MSG_VAR_ALLOC_ACCEPT(msg) +#define API_MSG_VAR_FREE_ACCEPT(msg) +#endif /* TCP_LISTEN_BACKLOG */ + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_RECVMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->recvmbox) && (((conn)->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & (NETCONN_FLAG_MBOXCLOSED|NETCONN_FLAG_MBOXINVALID)) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) SYS_ARCH_INC(conn->mbox_threads_waiting, 1) +#define NETCONN_MBOX_WAITING_DEC(conn) SYS_ARCH_DEC(conn->mbox_threads_waiting, 1) +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define NETCONN_RECVMBOX_WAITABLE(conn) sys_mbox_valid(&(conn)->recvmbox) +#define NETCONN_ACCEPTMBOX_WAITABLE(conn) (sys_mbox_valid(&(conn)->acceptmbox) && (((conn)->flags & NETCONN_FLAG_MBOXCLOSED) == 0)) +#define NETCONN_MBOX_WAITING_INC(conn) +#define NETCONN_MBOX_WAITING_DEC(conn) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +static err_t netconn_close_shutdown(struct netconn *conn, u8_t how); + +/** + * Call the lower part of a netconn_* function + * This function is then running in the thread context + * of tcpip_thread and has exclusive access to lwIP core code. + * + * @param fn function to call + * @param apimsg a struct containing the function to call and its parameters + * @return ERR_OK if the function was called, another err_t if not + */ +static err_t +netconn_apimsg(tcpip_callback_fn fn, struct api_msg *apimsg) +{ + err_t err; + +#ifdef LWIP_DEBUG + /* catch functions that don't set err */ + apimsg->err = ERR_VAL; +#endif /* LWIP_DEBUG */ + +#if LWIP_NETCONN_SEM_PER_THREAD + apimsg->op_completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + err = tcpip_send_msg_wait_sem(fn, apimsg, LWIP_API_MSG_SEM(apimsg)); + if (err == ERR_OK) { + return apimsg->err; + } + return err; +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is also created. + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param proto the IP protocol for RAW IP pcbs + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback) +{ + struct netconn *conn; + API_MSG_VAR_DECLARE(msg); + API_MSG_VAR_ALLOC_RETURN_NULL(msg); + + conn = netconn_alloc(t, callback); + if (conn != NULL) { + err_t err; + + API_MSG_VAR_REF(msg).msg.n.proto = proto; + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_newconn, &API_MSG_VAR_REF(msg)); + if (err != ERR_OK) { + LWIP_ASSERT("freeing conn without freeing pcb", conn->pcb.tcp == NULL); + LWIP_ASSERT("conn has no recvmbox", sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("conn->acceptmbox shouldn't exist", !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ +#if !LWIP_NETCONN_SEM_PER_THREAD + LWIP_ASSERT("conn has no op_completed", sys_sem_valid(&conn->op_completed)); + sys_sem_free(&conn->op_completed); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_free(&conn->recvmbox); + memp_free(MEMP_NETCONN, conn); + API_MSG_VAR_FREE(msg); + return NULL; + } + } + API_MSG_VAR_FREE(msg); + return conn; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free all its resources but not the netconn itself. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_prepare_delete(struct netconn *conn) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#if LWIP_TCP + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_TCP */ +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + err = netconn_apimsg(lwip_netconn_do_delconn, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + if (err != ERR_OK) { + return err; + } + return ERR_OK; +} + +/** + * @ingroup netconn_common + * Close a netconn 'connection' and free its resources. + * UDP and RAW connection are completely closed, TCP pcbs might still be in a waitstate + * after this returns. + * + * @param conn the netconn to delete + * @return ERR_OK if the connection was deleted + */ +err_t +netconn_delete(struct netconn *conn) +{ + err_t err; + + /* No ASSERT here because possible to get a (conn == NULL) if we got an accept error */ + if (conn == NULL) { + return ERR_OK; + } + +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + /* Already called netconn_prepare_delete() before */ + err = ERR_OK; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err = netconn_prepare_delete(conn); + } + if (err == ERR_OK) { + netconn_free(conn); + } + return err; +} + +/** + * Get the local or remote IP address and port of a netconn. + * For RAW netconns, this returns the protocol instead of a port! + * + * @param conn the netconn to query + * @param addr a pointer to which to save the IP address + * @param port a pointer to which to save the port (or protocol for RAW) + * @param local 1 to get the local IP address, 0 to get the remote one + * @return ERR_CONN for invalid connections + * ERR_OK if the information was retrieved + */ +err_t +netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_getaddr: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid addr", (addr != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_getaddr: invalid port", (port != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.ad.local = local; +#if LWIP_MPU_COMPATIBLE + err = netconn_apimsg(lwip_netconn_do_getaddr, &API_MSG_VAR_REF(msg)); + *addr = msg->msg.ad.ipaddr; + *port = msg->msg.ad.port; +#else /* LWIP_MPU_COMPATIBLE */ + msg.msg.ad.ipaddr = addr; + msg.msg.ad.port = port; + err = netconn_apimsg(lwip_netconn_do_getaddr, &msg); +#endif /* LWIP_MPU_COMPATIBLE */ + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific local IP address and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param addr the local IP address to bind the netconn to + * (use IP4_ADDR_ANY/IP6_ADDR_ANY to bind to all addresses) + * @param port the local port to bind the netconn to (not used for RAW) + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to bind to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is 0, use IP_ANY_TYPE to bind + */ + if ((netconn_get_ipv6only(conn) == 0) && + ip_addr_cmp(addr, IP6_ADDR_ANY)) { + addr = IP_ANY_TYPE; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_bind, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Bind a netconn to a specific interface and port. + * Binding one netconn twice might not always be checked correctly! + * + * @param conn the netconn to bind + * @param if_idx the local interface index to bind the netconn to + * @return ERR_OK if bound, any other err_t on failure + */ +err_t +netconn_bind_if(struct netconn *conn, u8_t if_idx) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_bind_if: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.if_idx = if_idx; + err = netconn_apimsg(lwip_netconn_do_bind_if, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_common + * Connect a netconn to a specific remote IP address and port. + * + * @param conn the netconn to connect + * @param addr the remote IP address to connect to + * @param port the remote port to connect to (no used for RAW) + * @return ERR_OK if connected, return value of tcp_/udp_/raw_connect otherwise + */ +err_t +netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_connect: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (addr == NULL) { + addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.bc.ipaddr = API_MSG_VAR_REF(addr); + API_MSG_VAR_REF(msg).msg.bc.port = port; + err = netconn_apimsg(lwip_netconn_do_connect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_udp + * Disconnect a netconn from its current peer (only valid for UDP netconns). + * + * @param conn the netconn to disconnect + * @return See @ref err_t + */ +err_t +netconn_disconnect(struct netconn *conn) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_disconnect: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + err = netconn_apimsg(lwip_netconn_do_disconnect, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Set a TCP netconn into listen mode + * + * @param conn the tcp netconn to set to listen mode + * @param backlog the listen backlog, only used if TCP_LISTEN_BACKLOG==1 + * @return ERR_OK if the netconn was set to listen (UDP and RAW netconns + * don't return any error (yet?)) + */ +err_t +netconn_listen_with_backlog(struct netconn *conn, u8_t backlog) +{ +#if LWIP_TCP + API_MSG_VAR_DECLARE(msg); + err_t err; + + /* This does no harm. If TCP_LISTEN_BACKLOG is off, backlog is unused. */ + LWIP_UNUSED_ARG(backlog); + + LWIP_ERROR("netconn_listen: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_REF(msg).msg.lb.backlog = backlog; +#endif /* TCP_LISTEN_BACKLOG */ + err = netconn_apimsg(lwip_netconn_do_listen, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(backlog); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_tcp + * Accept a new connection on a TCP listening netconn. + * + * @param conn the TCP listen netconn + * @param new_conn pointer where the new connection is stored + * @return ERR_OK if a new connection has been received or an error + * code otherwise + */ +err_t +netconn_accept(struct netconn *conn, struct netconn **new_conn) +{ +#if LWIP_TCP + err_t err; + void *accept_ptr; + struct netconn *newconn; +#if TCP_LISTEN_BACKLOG + API_MSG_VAR_DECLARE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + LWIP_ERROR("netconn_accept: invalid pointer", (new_conn != NULL), return ERR_ARG;); + *new_conn = NULL; + LWIP_ERROR("netconn_accept: invalid conn", (conn != NULL), return ERR_ARG;); + + /* NOTE: Although the opengroup spec says a pending error shall be returned to + send/recv/getsockopt(SO_ERROR) only, we return it for listening + connections also, to handle embedded-system errors */ + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (!NETCONN_ACCEPTMBOX_WAITABLE(conn)) { + /* don't accept if closed: this might block the application task + waiting on acceptmbox forever! */ + return ERR_CLSD; + } + + API_MSG_VAR_ALLOC_ACCEPT(msg); + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn)) { + if (sys_arch_mbox_tryfetch(&conn->acceptmbox, &accept_ptr) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + API_MSG_VAR_FREE_ACCEPT(msg); + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(accept_ptr)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + + if (lwip_netconn_is_err_msg(accept_ptr, &err)) { + /* a connection has been aborted: e.g. out of pcbs or out of netconns during accept */ + API_MSG_VAR_FREE_ACCEPT(msg); + return err; + } + if (accept_ptr == NULL) { + /* connection has been aborted */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CLSD; + } + newconn = (struct netconn *)accept_ptr; +#if TCP_LISTEN_BACKLOG + /* Let the stack know that we have accepted the connection. */ + API_MSG_VAR_REF(msg).conn = newconn; + /* don't care for the return value of lwip_netconn_do_recv */ + netconn_apimsg(lwip_netconn_do_accepted, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); +#endif /* TCP_LISTEN_BACKLOG */ + + *new_conn = newconn; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(conn); + LWIP_UNUSED_ARG(new_conn); + return ERR_ARG; +#endif /* LWIP_TCP */ +} + +/** + * @ingroup netconn_common + * Receive data: actual implementation that doesn't care whether pbuf or netbuf + * is received (this is internal, it's just here for describing common errors) + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf/netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_CONN if not connected + * ERR_CLSD if TCP connection has been closed + * ERR_WOULDBLOCK if the netconn is nonblocking but would block to wait for data + * ERR_TIMEOUT if the netconn has a receive timeout and no data was received + */ +static err_t +netconn_recv_data(struct netconn *conn, void **new_buf, u8_t apiflags) +{ + void *buf = NULL; + u16_t len; + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + err_t err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + return ERR_CONN; + } + + NETCONN_MBOX_WAITING_INC(conn); + if (netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK) || + (conn->flags & NETCONN_FLAG_MBOXCLOSED) || (conn->pending_err != ERR_OK)) { + if (sys_arch_mbox_tryfetch(&conn->recvmbox, &buf) == SYS_ARCH_TIMEOUT) { + err_t err; + NETCONN_MBOX_WAITING_DEC(conn); + err = netconn_err(conn); + if (err != ERR_OK) { + /* return pending error */ + return err; + } + if (conn->flags & NETCONN_FLAG_MBOXCLOSED) { + return ERR_CONN; + } + return ERR_WOULDBLOCK; + } + } else { +#if LWIP_SO_RCVTIMEO + if (sys_arch_mbox_fetch(&conn->recvmbox, &buf, conn->recv_timeout) == SYS_ARCH_TIMEOUT) { + NETCONN_MBOX_WAITING_DEC(conn); + return ERR_TIMEOUT; + } +#else + sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); +#endif /* LWIP_SO_RCVTIMEO*/ + } + NETCONN_MBOX_WAITING_DEC(conn); +#if LWIP_NETCONN_FULLDUPLEX + if (conn->flags & NETCONN_FLAG_MBOXINVALID) { + if (lwip_netconn_is_deallocated_msg(buf)) { + /* the netconn has been closed from another thread */ + API_MSG_VAR_FREE_ACCEPT(msg); + return ERR_CONN; + } + } +#endif + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + err_t err; + /* Check if this is an error message or a pbuf */ + if (lwip_netconn_is_err_msg(buf, &err)) { + /* new_buf has been zeroed above already */ + if (err == ERR_CLSD) { + /* connection closed translates to ERR_OK with *new_buf == NULL */ + return ERR_OK; + } + return err; + } + len = ((struct pbuf *)buf)->tot_len; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ +#if (LWIP_UDP || LWIP_RAW) + { + LWIP_ASSERT("buf != NULL", buf != NULL); + len = netbuf_len((struct netbuf *)buf); + } +#endif /* (LWIP_UDP || LWIP_RAW) */ + +#if LWIP_SO_RCVBUF + SYS_ARCH_DEC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVMINUS, len); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_recv_data: received %p, len=%"U16_F"\n", buf, len)); + + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; +} + +#if LWIP_TCP +static err_t +netconn_tcp_recvd_msg(struct netconn *conn, size_t len, struct api_msg *msg) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + msg->conn = conn; + msg->msg.r.len = len; + + return netconn_apimsg(lwip_netconn_do_recv, msg); +} + +err_t +netconn_tcp_recvd(struct netconn *conn, size_t len) +{ + err_t err; + API_MSG_VAR_DECLARE(msg); + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + err = netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + return err; +} + +static err_t +netconn_recv_data_tcp(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + err_t err; + struct pbuf *buf; + API_MSG_VAR_DECLARE(msg); +#if LWIP_MPU_COMPATIBLE + msg = NULL; +#endif + + if (!NETCONN_RECVMBOX_WAITABLE(conn)) { + /* This only happens when calling this function more than once *after* receiving FIN */ + return ERR_CONN; + } + if (netconn_is_flag_set(conn, NETCONN_FIN_RX_PENDING)) { + netconn_clear_flags(conn, NETCONN_FIN_RX_PENDING); + goto handle_fin; + } + + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* need to allocate API message here so empty message pool does not result in event loss + * see bug #47512: MPU_COMPATIBLE may fail on empty pool */ + API_MSG_VAR_ALLOC(msg); + } + + err = netconn_recv_data(conn, (void **)new_buf, apiflags); + if (err != ERR_OK) { + if (!(apiflags & NETCONN_NOAUTORCVD)) { + API_MSG_VAR_FREE(msg); + } + return err; + } + buf = *new_buf; + if (!(apiflags & NETCONN_NOAUTORCVD)) { + /* Let the stack know that we have taken the data. */ + u16_t len = buf ? buf->tot_len : 1; + /* don't care for the return value of lwip_netconn_do_recv */ + /* @todo: this should really be fixed, e.g. by retrying in poll on error */ + netconn_tcp_recvd_msg(conn, len, &API_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + } + + /* If we are closed, we indicate that we no longer wish to use the socket */ + if (buf == NULL) { + if (apiflags & NETCONN_NOFIN) { + /* received a FIN but the caller cannot handle it right now: + re-enqueue it and return "no data" */ + netconn_set_flags(conn, NETCONN_FIN_RX_PENDING); + return ERR_WOULDBLOCK; + } else { +handle_fin: + API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); + if (conn->pcb.ip == NULL) { + /* race condition: RST during recv */ + err = netconn_err(conn); + if (err != ERR_OK) { + return err; + } + return ERR_RST; + } + /* RX side is closed, so deallocate the recvmbox */ + netconn_close_shutdown(conn, NETCONN_SHUT_RD); + /* Don' store ERR_CLSD as conn->err since we are only half-closed */ + return ERR_CLSD; + } + } + return err; +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, 0); +} + +/** + * @ingroup netconn_tcp + * Receive data (in form of a pbuf) from a TCP netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new pbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error, @see netconn_recv_data) + * ERR_ARG if conn is not a TCP netconn + */ +err_t +netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_tcp_pbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) == NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data_tcp(conn, new_buf, apiflags); +} +#endif /* LWIP_TCP */ + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, 0); +} + +/** + * Receive data (in form of a netbuf) from a UDP or RAW netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @param apiflags flags that control function behaviour. For now only: + * - NETCONN_DONTBLOCK: only read data that is available now, don't wait for more data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + * ERR_ARG if conn is not a UDP/RAW netconn + */ +err_t +netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags) +{ + LWIP_ERROR("netconn_recv_udp_raw_netbuf: invalid conn", (conn != NULL) && + NETCONNTYPE_GROUP(netconn_type(conn)) != NETCONN_TCP, return ERR_ARG;); + + return netconn_recv_data(conn, (void **)new_buf, apiflags); +} + +/** + * @ingroup netconn_common + * Receive data (in form of a netbuf containing a packet buffer) from a netconn + * + * @param conn the netconn from which to receive data + * @param new_buf pointer where a new netbuf is stored when received data + * @return ERR_OK if data has been received, an error code otherwise (timeout, + * memory error or another error) + */ +err_t +netconn_recv(struct netconn *conn, struct netbuf **new_buf) +{ +#if LWIP_TCP + struct netbuf *buf = NULL; + err_t err; +#endif /* LWIP_TCP */ + + LWIP_ERROR("netconn_recv: invalid pointer", (new_buf != NULL), return ERR_ARG;); + *new_buf = NULL; + LWIP_ERROR("netconn_recv: invalid conn", (conn != NULL), return ERR_ARG;); + +#if LWIP_TCP +#if (LWIP_UDP || LWIP_RAW) + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) +#endif /* (LWIP_UDP || LWIP_RAW) */ + { + struct pbuf *p = NULL; + /* This is not a listening netconn, since recvmbox is set */ + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + return ERR_MEM; + } + + err = netconn_recv_data_tcp(conn, &p, 0); + if (err != ERR_OK) { + memp_free(MEMP_NETBUF, buf); + return err; + } + LWIP_ASSERT("p != NULL", p != NULL); + + buf->p = p; + buf->ptr = p; + buf->port = 0; + ip_addr_set_zero(&buf->addr); + *new_buf = buf; + /* don't set conn->last_err: it's only ERR_OK, anyway */ + return ERR_OK; + } +#endif /* LWIP_TCP */ +#if LWIP_TCP && (LWIP_UDP || LWIP_RAW) + else +#endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ + { +#if (LWIP_UDP || LWIP_RAW) + return netconn_recv_data(conn, (void **)new_buf, 0); +#endif /* (LWIP_UDP || LWIP_RAW) */ + } +} + +/** + * @ingroup netconn_udp + * Send data (in form of a netbuf) to a specific remote IP address and port. + * Only to be used for UDP and RAW netconns (not TCP). + * + * @param conn the netconn over which to send data + * @param buf a netbuf containing the data to send + * @param addr the remote IP address to which to send the data + * @param port the remote port to which to send the data + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port) +{ + if (buf != NULL) { + ip_addr_set(&buf->addr, addr); + buf->port = port; + return netconn_send(conn, buf); + } + return ERR_VAL; +} + +/** + * @ingroup netconn_udp + * Send data over a UDP or RAW netconn (that is already connected). + * + * @param conn the UDP or RAW netconn over which to send data + * @param buf a netbuf containing the data to send + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_send(struct netconn *conn, struct netbuf *buf) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_send: invalid conn", (conn != NULL), return ERR_ARG;); + + LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_send: sending %"U16_F" bytes\n", buf->p->tot_len)); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.b = buf; + err = netconn_apimsg(lwip_netconn_do_send, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Send data over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param dataptr pointer to the application buffer that contains the data to send + * @param size size of the application data to send + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written) +{ + struct netvector vector; + vector.ptr = dataptr; + vector.len = size; + return netconn_write_vectors_partly(conn, &vector, 1, apiflags, bytes_written); +} + +/** + * Send vectorized data atomically over a TCP netconn. + * + * @param conn the TCP netconn over which to send data + * @param vectors array of vectors containing data to send + * @param vectorcnt number of vectors in the array + * @param apiflags combination of following flags : + * - NETCONN_COPY: data will be copied into memory belonging to the stack + * - NETCONN_MORE: for TCP connection, PSH flag will be set on last segment sent + * - NETCONN_DONTBLOCK: only write the data if all data can be written at once + * @param bytes_written pointer to a location that receives the number of written bytes + * @return ERR_OK if data was sent, any other err_t on error + */ +err_t +netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + u8_t dontblock; + size_t size; + int i; + + LWIP_ERROR("netconn_write: invalid conn", (conn != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_write: invalid conn->type", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP), return ERR_VAL;); + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + dontblock = 1; + } +#endif /* LWIP_SO_SNDTIMEO */ + if (dontblock && !bytes_written) { + /* This implies netconn_write() cannot be used for non-blocking send, since + it has no way to return the number of bytes written. */ + return ERR_VAL; + } + + /* sum up the total size */ + size = 0; + for (i = 0; i < vectorcnt; i++) { + size += vectors[i].len; + if (size < vectors[i].len) { + /* overflow */ + return ERR_VAL; + } + } + if (size == 0) { + return ERR_OK; + } else if (size > SSIZE_MAX) { + ssize_t limited; + /* this is required by the socket layer (cannot send full size_t range) */ + if (!bytes_written) { + return ERR_VAL; + } + /* limit the amount of data to send */ + limited = SSIZE_MAX; + size = (size_t)limited; + } + + API_MSG_VAR_ALLOC(msg); + /* non-blocking write sends as much */ + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.w.vector = vectors; + API_MSG_VAR_REF(msg).msg.w.vector_cnt = vectorcnt; + API_MSG_VAR_REF(msg).msg.w.vector_off = 0; + API_MSG_VAR_REF(msg).msg.w.apiflags = apiflags; + API_MSG_VAR_REF(msg).msg.w.len = size; + API_MSG_VAR_REF(msg).msg.w.offset = 0; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout != 0) { + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.w.time_started = sys_now(); + } else { + API_MSG_VAR_REF(msg).msg.w.time_started = 0; + } +#endif /* LWIP_SO_SNDTIMEO */ + + /* For locking the core: this _can_ be delayed on low memory/low send buffer, + but if it is, this is done inside api_msg.c:do_write(), so we can use the + non-blocking version here. */ + err = netconn_apimsg(lwip_netconn_do_write, &API_MSG_VAR_REF(msg)); + if (err == ERR_OK) { + if (bytes_written != NULL) { + *bytes_written = API_MSG_VAR_REF(msg).msg.w.offset; + } + /* for blocking, check all requested bytes were written, NOTE: send_timeout is + treated as dontblock (see dontblock assignment above) */ + if (!dontblock) { + LWIP_ASSERT("do_write failed to write all bytes", API_MSG_VAR_REF(msg).msg.w.offset == size); + } + } + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close or shutdown a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close or shutdown + * @param how fully close or only shutdown one side? + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +static err_t +netconn_close_shutdown(struct netconn *conn, u8_t how) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + LWIP_UNUSED_ARG(how); + + LWIP_ERROR("netconn_close: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + API_MSG_VAR_REF(msg).conn = conn; +#if LWIP_TCP + /* shutting down both ends is the same as closing */ + API_MSG_VAR_REF(msg).msg.sd.shut = how; +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + /* get the time we started, which is later compared to + sys_now() + conn->send_timeout */ + API_MSG_VAR_REF(msg).msg.sd.time_started = sys_now(); +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + API_MSG_VAR_REF(msg).msg.sd.polls_left = + ((LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT + TCP_SLOW_INTERVAL - 1) / TCP_SLOW_INTERVAL) + 1; +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ +#endif /* LWIP_TCP */ + err = netconn_apimsg(lwip_netconn_do_close, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} + +/** + * @ingroup netconn_tcp + * Close a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to close + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_close(struct netconn *conn) +{ + /* shutting down both ends is the same as closing */ + return netconn_close_shutdown(conn, NETCONN_SHUT_RDWR); +} + +/** + * @ingroup netconn_common + * Get and reset pending error on a netconn + * + * @param conn the netconn to get the error from + * @return and pending error or ERR_OK if no error was pending + */ +err_t +netconn_err(struct netconn *conn) +{ + err_t err; + SYS_ARCH_DECL_PROTECT(lev); + if (conn == NULL) { + return ERR_OK; + } + SYS_ARCH_PROTECT(lev); + err = conn->pending_err; + conn->pending_err = ERR_OK; + SYS_ARCH_UNPROTECT(lev); + return err; +} + +/** + * @ingroup netconn_tcp + * Shut down one or both sides of a TCP netconn (doesn't delete it). + * + * @param conn the TCP netconn to shut down + * @param shut_rx shut down the RX side (no more read possible after this) + * @param shut_tx shut down the TX side (no more write possible after this) + * @return ERR_OK if the netconn was closed, any other err_t on error + */ +err_t +netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx) +{ + return netconn_close_shutdown(conn, (u8_t)((shut_rx ? NETCONN_SHUT_RD : 0) | (shut_tx ? NETCONN_SHUT_WR : 0))); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param netif_addr the IP address of the network interface on which to send + * the igmp message + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group(struct netconn *conn, + const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (netif_addr == NULL) { + netif_addr = IP4_ADDR_ANY; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.netif_addr = API_MSG_VAR_REF(netif_addr); + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +/** + * @ingroup netconn_udp + * Join multicast groups for UDP netconns. + * + * @param conn the UDP netconn for which to change multicast addresses + * @param multiaddr IP address of the multicast group to join or leave + * @param if_idx the index of the netif + * @param join_or_leave flag whether to send a join- or leave-message + * @return ERR_OK if the action was taken, any err_t on error + */ +err_t +netconn_join_leave_group_netif(struct netconn *conn, + const ip_addr_t *multiaddr, + u8_t if_idx, + enum netconn_igmp join_or_leave) +{ + API_MSG_VAR_DECLARE(msg); + err_t err; + + LWIP_ERROR("netconn_join_leave_group: invalid conn", (conn != NULL), return ERR_ARG;); + + API_MSG_VAR_ALLOC(msg); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IP_ADDR_ANY alias) to subsequent functions */ + if (multiaddr == NULL) { + multiaddr = IP4_ADDR_ANY; + } + if (if_idx == NETIF_NO_INDEX) { + return ERR_IF; + } +#endif /* LWIP_IPV4 */ + + API_MSG_VAR_REF(msg).conn = conn; + API_MSG_VAR_REF(msg).msg.jl.multiaddr = API_MSG_VAR_REF(multiaddr); + API_MSG_VAR_REF(msg).msg.jl.if_idx = if_idx; + API_MSG_VAR_REF(msg).msg.jl.join_or_leave = join_or_leave; + err = netconn_apimsg(lwip_netconn_do_join_leave_group_netif, &API_MSG_VAR_REF(msg)); + API_MSG_VAR_FREE(msg); + + return err; +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * @ingroup netconn_common + * Execute a DNS query, only one IP address is returned + * + * @param name a string representation of the DNS host name to query + * @param addr a preallocated ip_addr_t where to store the resolved IP address + * @param dns_addrtype IP address type (IPv4 / IPv6) + * @return ERR_OK: resolving succeeded + * ERR_MEM: memory error, try again later + * ERR_ARG: dns client not initialized or invalid hostname + * ERR_VAL: dns server response was invalid + */ +#if LWIP_IPV4 && LWIP_IPV6 +err_t +netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype) +#else +err_t +netconn_gethostbyname(const char *name, ip_addr_t *addr) +#endif +{ + API_VAR_DECLARE(struct dns_api_msg, msg); +#if !LWIP_MPU_COMPATIBLE + sys_sem_t sem; +#endif /* LWIP_MPU_COMPATIBLE */ + err_t err; + err_t cberr; + + LWIP_ERROR("netconn_gethostbyname: invalid name", (name != NULL), return ERR_ARG;); + LWIP_ERROR("netconn_gethostbyname: invalid addr", (addr != NULL), return ERR_ARG;); +#if LWIP_MPU_COMPATIBLE + if (strlen(name) >= DNS_MAX_NAME_LENGTH) { + return ERR_ARG; + } +#endif + +#ifdef LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, dns_addrtype, &err)) { +#else + if (LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE(name, addr, NETCONN_DNS_DEFAULT, &err)) { +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return err; + } +#endif /* LWIP_HOOK_NETCONN_EXTERNAL_RESOLVE */ + + API_VAR_ALLOC(struct dns_api_msg, MEMP_DNS_API_MSG, msg, ERR_MEM); +#if LWIP_MPU_COMPATIBLE + strncpy(API_VAR_REF(msg).name, name, DNS_MAX_NAME_LENGTH - 1); + API_VAR_REF(msg).name[DNS_MAX_NAME_LENGTH - 1] = 0; +#else /* LWIP_MPU_COMPATIBLE */ + msg.err = &err; + msg.sem = &sem; + API_VAR_REF(msg).addr = API_VAR_REF(addr); + API_VAR_REF(msg).name = name; +#endif /* LWIP_MPU_COMPATIBLE */ +#if LWIP_IPV4 && LWIP_IPV6 + API_VAR_REF(msg).dns_addrtype = dns_addrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_NETCONN_SEM_PER_THREAD + API_VAR_REF(msg).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD*/ + err = sys_sem_new(API_EXPR_REF(API_VAR_REF(msg).sem), 0); + if (err != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + cberr = tcpip_send_msg_wait_sem(lwip_netconn_do_gethostbyname, &API_VAR_REF(msg), API_EXPR_REF(API_VAR_REF(msg).sem)); +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(API_EXPR_REF(API_VAR_REF(msg).sem)); +#endif /* !LWIP_NETCONN_SEM_PER_THREAD */ + if (cberr != ERR_OK) { + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return cberr; + } + +#if LWIP_MPU_COMPATIBLE + *addr = msg->addr; + err = msg->err; +#endif /* LWIP_MPU_COMPATIBLE */ + + API_VAR_FREE(MEMP_DNS_API_MSG, msg); + return err; +} +#endif /* LWIP_DNS*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void +netconn_thread_init(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem == NULL) || !sys_sem_valid(sem)) { + /* call alloc only once */ + LWIP_NETCONN_THREAD_SEM_ALLOC(); + LWIP_ASSERT("LWIP_NETCONN_THREAD_SEM_ALLOC() failed", sys_sem_valid(LWIP_NETCONN_THREAD_SEM_GET())); + } +} + +void +netconn_thread_cleanup(void) +{ + sys_sem_t *sem = LWIP_NETCONN_THREAD_SEM_GET(); + if ((sem != NULL) && sys_sem_valid(sem)) { + /* call free only once */ + LWIP_NETCONN_THREAD_SEM_FREE(); + } +} +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c new file mode 100644 index 0000000..8952baa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/api_msg.c @@ -0,0 +1,2173 @@ +/** + * @file + * Sequential API Internal module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/api_msg.h" + +#include "lwip/ip.h" +#include "lwip/ip_addr.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" + +#include "lwip/memp.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/mld6.h" +#include "lwip/priv/tcpip_priv.h" + +#include + +/* netconns are polled once per second (e.g. continue write on memory error) */ +#define NETCONN_TCP_POLL_INTERVAL 2 + +#define SET_NONBLOCKING_CONNECT(conn, val) do { if (val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT); }} while(0) +#define IN_NONBLOCKING_CONNECT(conn) netconn_is_flag_set(conn, NETCONN_FLAG_IN_NONBLOCKING_CONNECT) + +#if LWIP_NETCONN_FULLDUPLEX +#define NETCONN_MBOX_VALID(conn, mbox) (sys_mbox_valid(mbox) && ((conn->flags & NETCONN_FLAG_MBOXINVALID) == 0)) +#else +#define NETCONN_MBOX_VALID(conn, mbox) sys_mbox_valid(mbox) +#endif + +/* forward declarations */ +#if LWIP_TCP +#if LWIP_TCPIP_CORE_LOCKING +#define WRITE_DELAYED , 1 +#define WRITE_DELAYED_PARAM , u8_t delayed +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define WRITE_DELAYED +#define WRITE_DELAYED_PARAM +#endif /* LWIP_TCPIP_CORE_LOCKING */ +static err_t lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM); +static err_t lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM); +#endif + +static void netconn_drain(struct netconn *conn); + +#if LWIP_TCPIP_CORE_LOCKING +#define TCPIP_APIMSG_ACK(m) +#else /* LWIP_TCPIP_CORE_LOCKING */ +#define TCPIP_APIMSG_ACK(m) do { sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_NETCONN_FULLDUPLEX +const u8_t netconn_deleted = 0; + +int +lwip_netconn_is_deallocated_msg(void *msg) +{ + if (msg == &netconn_deleted) { + return 1; + } + return 0; +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +const u8_t netconn_aborted = 0; +const u8_t netconn_reset = 0; +const u8_t netconn_closed = 0; + +/** Translate an error to a unique void* passed via an mbox */ +static void * +lwip_netconn_err_to_msg(err_t err) +{ + switch (err) { + case ERR_ABRT: + return LWIP_CONST_CAST(void *, &netconn_aborted); + case ERR_RST: + return LWIP_CONST_CAST(void *, &netconn_reset); + case ERR_CLSD: + return LWIP_CONST_CAST(void *, &netconn_closed); + default: + LWIP_ASSERT("unhandled error", err == ERR_OK); + return NULL; + } +} + +int +lwip_netconn_is_err_msg(void *msg, err_t *err) +{ + LWIP_ASSERT("err != NULL", err != NULL); + + if (msg == &netconn_aborted) { + *err = ERR_ABRT; + return 1; + } else if (msg == &netconn_reset) { + *err = ERR_RST; + return 1; + } else if (msg == &netconn_closed) { + *err = ERR_CLSD; + return 1; + } + return 0; +} +#endif /* LWIP_TCP */ + + +#if LWIP_RAW +/** + * Receive callback function for RAW netconns. + * Doesn't 'eat' the packet, only copies it and sends it to + * conn->recvmbox + * + * @see raw.h (struct raw_pcb.recv) for parameters and return value + */ +static u8_t +recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr) +{ + struct pbuf *q; + struct netbuf *buf; + struct netconn *conn; + + LWIP_UNUSED_ARG(addr); + conn = (struct netconn *)arg; + + if ((conn != NULL) && NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#if LWIP_SO_RCVBUF + int recv_avail; + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize) { + return 0; + } +#endif /* LWIP_SO_RCVBUF */ + /* copy the whole packet into new pbufs */ + q = pbuf_clone(PBUF_RAW, PBUF_RAM, p); + if (q != NULL) { + u16_t len; + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(q); + return 0; + } + + buf->p = q; + buf->ptr = q; + ip_addr_copy(buf->addr, *ip_current_src_addr()); + buf->port = pcb->protocol; + + len = q->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return 0; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + } + } + + return 0; /* do not eat the packet */ +} +#endif /* LWIP_RAW*/ + +#if LWIP_UDP +/** + * Receive callback function for UDP netconns. + * Posts the packet to conn->recvmbox or deletes it on memory error. + * + * @see udp.h (struct udp_pcb.recv) for parameters + */ +static void +recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *addr, u16_t port) +{ + struct netbuf *buf; + struct netconn *conn; + u16_t len; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + LWIP_UNUSED_ARG(pcb); /* only used for asserts... */ + LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_udp must have an argument", arg != NULL); + conn = (struct netconn *)arg; + + if (conn == NULL) { + pbuf_free(p); + return; + } + + LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb); + +#if LWIP_SO_RCVBUF + SYS_ARCH_GET(conn->recv_avail, recv_avail); + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox) || + ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) { +#else /* LWIP_SO_RCVBUF */ + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { +#endif /* LWIP_SO_RCVBUF */ + pbuf_free(p); + return; + } + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf == NULL) { + pbuf_free(p); + return; + } else { + buf->p = p; + buf->ptr = p; + ip_addr_set(&buf->addr, addr); + buf->port = port; +#if LWIP_NETBUF_RECVINFO + if (conn->flags & NETCONN_FLAG_PKTINFO) { + /* get the UDP header - always in the first pbuf, ensured by udp_input */ + const struct udp_hdr *udphdr = (const struct udp_hdr *)ip_next_header_ptr(); + buf->flags = NETBUF_FLAG_DESTADDR; + ip_addr_set(&buf->toaddr, ip_current_dest_addr()); + buf->toport_chksum = udphdr->dest; + } +#endif /* LWIP_NETBUF_RECVINFO */ + } + + len = p->tot_len; + if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) { + netbuf_delete(buf); + return; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } +} +#endif /* LWIP_UDP */ + +#if LWIP_TCP +/** + * Receive callback function for TCP netconns. + * Posts the packet to conn->recvmbox, but doesn't delete it on errors. + * + * @see tcp.h (struct tcp_pcb.recv) for parameters and return value + */ +static err_t +recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + struct netconn *conn; + u16_t len; + void *msg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL); + LWIP_ASSERT("recv_tcp must have an argument", arg != NULL); + LWIP_ASSERT("err != ERR_OK unhandled", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb); + + if (!NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* recvmbox already deleted */ + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } + return ERR_OK; + } + /* Unlike for UDP or RAW pcbs, don't check for available space + using recv_avail since that could break the connection + (data is already ACKed) */ + + if (p != NULL) { + msg = p; + len = p->tot_len; + } else { + msg = LWIP_CONST_CAST(void *, &netconn_closed); + len = 0; + } + + if (sys_mbox_trypost(&conn->recvmbox, msg) != ERR_OK) { + /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */ + return ERR_MEM; + } else { +#if LWIP_SO_RCVBUF + SYS_ARCH_INC(conn->recv_avail, len); +#endif /* LWIP_SO_RCVBUF */ + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, len); + } + + return ERR_OK; +} + +/** + * Poll callback function for TCP netconns. + * Wakes up an application thread that waits for a connection to close + * or data to be sent. The application thread then takes the + * appropriate action to go on. + * + * Signals the conn->sem. + * netconn_close waits for conn->sem if closing failed. + * + * @see tcp.h (struct tcp_pcb.poll) for parameters and return value + */ +static err_t +poll_tcp(void *arg, struct tcp_pcb *pcb) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { +#if !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER + if (conn->current_msg && conn->current_msg->msg.sd.polls_left) { + conn->current_msg->msg.sd.polls_left--; + } +#endif /* !LWIP_SO_SNDTIMEO && !LWIP_SO_LINGER */ + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + /* @todo: implement connect timeout here? */ + + /* Did a nonblocking write fail before? Then check available write-space. */ + if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } + + return ERR_OK; +} + +/** + * Sent callback function for TCP netconns. + * Signals the conn->sem and calls API_EVENT. + * netconn_write waits for conn->sem if send buffer is low. + * + * @see tcp.h (struct tcp_pcb.sent) for parameters and return value + */ +static err_t +sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) +{ + struct netconn *conn = (struct netconn *)arg; + + LWIP_UNUSED_ARG(pcb); + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + if (conn) { + if (conn->state == NETCONN_WRITE) { + lwip_netconn_do_writemore(conn WRITE_DELAYED); + } else if (conn->state == NETCONN_CLOSE) { + lwip_netconn_do_close_internal(conn WRITE_DELAYED); + } + + /* If the queued byte- or pbuf-count drops below the configured low-water limit, + let select mark this pcb as writable again. */ + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + netconn_clear_flags(conn, NETCONN_FLAG_CHECK_WRITESPACE); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); + } + } + + return ERR_OK; +} + +/** + * Error callback function for TCP netconns. + * Signals conn->sem, posts to all conn mboxes and calls API_EVENT. + * The application thread has then to decide what to do. + * + * @see tcp.h (struct tcp_pcb.err) for parameters + */ +static void +err_tcp(void *arg, err_t err) +{ + struct netconn *conn; + enum netconn_state old_state; + void *mbox_msg; + SYS_ARCH_DECL_PROTECT(lev); + + conn = (struct netconn *)arg; + LWIP_ASSERT("conn != NULL", (conn != NULL)); + + SYS_ARCH_PROTECT(lev); + + /* when err is called, the pcb is deallocated, so delete the reference */ + conn->pcb.tcp = NULL; + /* store pending error */ + conn->pending_err = err; + /* prevent application threads from blocking on 'recvmbox'/'acceptmbox' */ + conn->flags |= NETCONN_FLAG_MBOXCLOSED; + + /* reset conn->state now before waking up other threads */ + old_state = conn->state; + conn->state = NETCONN_NONE; + + SYS_ARCH_UNPROTECT(lev); + + /* Notify the user layer about a connection error. Used to signal select. */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + /* Try to release selects pending on 'read' or 'write', too. + They will get an error if they actually try to read or write. */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + mbox_msg = lwip_netconn_err_to_msg(err); + /* pass error message to recvmbox to wake up pending recv */ + if (NETCONN_MBOX_VALID(conn, &conn->recvmbox)) { + /* use trypost to prevent deadlock */ + sys_mbox_trypost(&conn->recvmbox, mbox_msg); + } + /* pass error message to acceptmbox to wake up pending accept */ + if (NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + /* use trypost to preven deadlock */ + sys_mbox_trypost(&conn->acceptmbox, mbox_msg); + } + + if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) || + (old_state == NETCONN_CONNECT)) { + /* calling lwip_netconn_do_writemore/lwip_netconn_do_close_internal is not necessary + since the pcb has already been deleted! */ + int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + + if (!was_nonblocking_connect) { + sys_sem_t *op_completed_sem; + /* set error return code */ + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + if (old_state == NETCONN_CLOSE) { + /* let close succeed: the connection is closed after all... */ + conn->current_msg->err = ERR_OK; + } else { + /* Write and connect fail */ + conn->current_msg->err = err; + } + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + LWIP_ASSERT("inavlid op_completed_sem", sys_sem_valid(op_completed_sem)); + conn->current_msg = NULL; + /* wake up the waiting task */ + sys_sem_signal(op_completed_sem); + } else { + /* @todo: test what happens for error on nonblocking connect */ + } + } else { + LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL); + } +} + +/** + * Setup a tcp_pcb with the correct callback function pointers + * and their arguments. + * + * @param conn the TCP netconn to setup + */ +static void +setup_tcp(struct netconn *conn) +{ + struct tcp_pcb *pcb; + + pcb = conn->pcb.tcp; + tcp_arg(pcb, conn); + tcp_recv(pcb, recv_tcp); + tcp_sent(pcb, sent_tcp); + tcp_poll(pcb, poll_tcp, NETCONN_TCP_POLL_INTERVAL); + tcp_err(pcb, err_tcp); +} + +/** + * Accept callback function for TCP netconns. + * Allocates a new netconn and posts that to conn->acceptmbox. + * + * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value + */ +static err_t +accept_function(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + struct netconn *newconn; + struct netconn *conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + if (!NETCONN_MBOX_VALID(conn, &conn->acceptmbox)) { + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n")); + return ERR_VAL; + } + + if (newpcb == NULL) { + /* out-of-pcbs during connect: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_VAL; + } + LWIP_ASSERT("expect newpcb == NULL or err == ERR_OK", err == ERR_OK); + LWIP_UNUSED_ARG(err); /* for LWIP_NOASSERT */ + + LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->state: %s\n", tcp_debug_state_str(newpcb->state))); + + /* We have to set the callback here even though + * the new socket is unknown. newconn->socket is marked as -1. */ + newconn = netconn_alloc(conn->type, conn->callback); + if (newconn == NULL) { + /* outof netconns: pass on this error to the application */ + if (sys_mbox_trypost(&conn->acceptmbox, lwip_netconn_err_to_msg(ERR_ABRT)) == ERR_OK) { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + return ERR_MEM; + } + newconn->pcb.tcp = newpcb; + setup_tcp(newconn); + + /* handle backlog counter */ + tcp_backlog_delayed(newpcb); + + if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) { + /* When returning != ERR_OK, the pcb is aborted in tcp_process(), + so do nothing here! */ + /* remove all references to this netconn from the pcb */ + struct tcp_pcb *pcb = newconn->pcb.tcp; + tcp_arg(pcb, NULL); + tcp_recv(pcb, NULL); + tcp_sent(pcb, NULL); + tcp_poll(pcb, NULL, 0); + tcp_err(pcb, NULL); + /* remove reference from to the pcb from this netconn */ + newconn->pcb.tcp = NULL; + /* no need to drain since we know the recvmbox is empty. */ + sys_mbox_free(&newconn->recvmbox); + sys_mbox_set_invalid(&newconn->recvmbox); + netconn_free(newconn); + return ERR_MEM; + } else { + /* Register event with callback */ + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Create a new pcb of a specific type. + * Called from lwip_netconn_do_newconn(). + * + * @param msg the api_msg describing the connection type + */ +static void +pcb_new(struct api_msg *msg) +{ + enum lwip_ip_addr_type iptype = IPADDR_TYPE_V4; + + LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL); + +#if LWIP_IPV6 && LWIP_IPV4 + /* IPv6: Dual-stack by default, unless netconn_set_ipv6only() is called */ + if (NETCONNTYPE_ISIPV6(netconn_type(msg->conn))) { + iptype = IPADDR_TYPE_ANY; + } +#endif + + /* Allocate a PCB for this connection */ + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + msg->conn->pcb.raw = raw_new_ip_type(iptype, msg->msg.n.proto); + if (msg->conn->pcb.raw != NULL) { +#if LWIP_IPV6 + /* ICMPv6 packets should always have checksum calculated by the stack as per RFC 3542 chapter 3.1 */ + if (NETCONNTYPE_ISIPV6(msg->conn->type) && msg->conn->pcb.raw->protocol == IP6_NEXTH_ICMP6) { + msg->conn->pcb.raw->chksum_reqd = 1; + msg->conn->pcb.raw->chksum_offset = 2; + } +#endif /* LWIP_IPV6 */ + raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn); + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp = udp_new_ip_type(iptype); + if (msg->conn->pcb.udp != NULL) { +#if LWIP_UDPLITE + if (NETCONNTYPE_ISUDPLITE(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE); + } +#endif /* LWIP_UDPLITE */ + if (NETCONNTYPE_ISUDPNOCHKSUM(msg->conn->type)) { + udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn); + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + msg->conn->pcb.tcp = tcp_new_ip_type(iptype); + if (msg->conn->pcb.tcp != NULL) { + setup_tcp(msg->conn); + } + break; +#endif /* LWIP_TCP */ + default: + /* Unsupported netconn type, e.g. protocol disabled */ + msg->err = ERR_VAL; + return; + } + if (msg->conn->pcb.ip == NULL) { + msg->err = ERR_MEM; + } +} + +/** + * Create a new pcb of a specific type inside a netconn. + * Called from netconn_new_with_proto_and_callback. + * + * @param m the api_msg describing the connection type + */ +void +lwip_netconn_do_newconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp == NULL) { + pcb_new(msg); + } + /* Else? This "new" connection already has a PCB allocated. */ + /* Is this an error condition? Should it be deleted? */ + /* We currently just are happy and return. */ + + TCPIP_APIMSG_ACK(msg); +} + +/** + * Create a new netconn (of a specific type) that has a callback function. + * The corresponding pcb is NOT created! + * + * @param t the type of 'connection' to create (@see enum netconn_type) + * @param callback a function to call on status changes (RX available, TX'ed) + * @return a newly allocated struct netconn or + * NULL on memory error + */ +struct netconn * +netconn_alloc(enum netconn_type t, netconn_callback callback) +{ + struct netconn *conn; + int size; + u8_t init_flags = 0; + + conn = (struct netconn *)memp_malloc(MEMP_NETCONN); + if (conn == NULL) { + return NULL; + } + + conn->pending_err = ERR_OK; + conn->type = t; + conn->pcb.tcp = NULL; + + /* If all sizes are the same, every compiler should optimize this switch to nothing */ + switch (NETCONNTYPE_GROUP(t)) { +#if LWIP_RAW + case NETCONN_RAW: + size = DEFAULT_RAW_RECVMBOX_SIZE; + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + size = DEFAULT_UDP_RECVMBOX_SIZE; +#if LWIP_NETBUF_RECVINFO + init_flags |= NETCONN_FLAG_PKTINFO; +#endif /* LWIP_NETBUF_RECVINFO */ + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + size = DEFAULT_TCP_RECVMBOX_SIZE; + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0); + goto free_and_return; + } + + if (sys_mbox_new(&conn->recvmbox, size) != ERR_OK) { + goto free_and_return; + } +#if !LWIP_NETCONN_SEM_PER_THREAD + if (sys_sem_new(&conn->op_completed, 0) != ERR_OK) { + sys_mbox_free(&conn->recvmbox); + goto free_and_return; + } +#endif + +#if LWIP_TCP + sys_mbox_set_invalid(&conn->acceptmbox); +#endif + conn->state = NETCONN_NONE; +#if LWIP_SOCKET + /* initialize socket to -1 since 0 is a valid socket */ + conn->socket = -1; +#endif /* LWIP_SOCKET */ + conn->callback = callback; +#if LWIP_TCP + conn->current_msg = NULL; +#endif /* LWIP_TCP */ +#if LWIP_SO_SNDTIMEO + conn->send_timeout = 0; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + conn->recv_timeout = 0; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + conn->recv_bufsize = RECV_BUFSIZE_DEFAULT; + conn->recv_avail = 0; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + conn->linger = -1; +#endif /* LWIP_SO_LINGER */ + conn->flags = init_flags; + return conn; +free_and_return: + memp_free(MEMP_NETCONN, conn); + return NULL; +} + +/** + * Delete a netconn and all its resources. + * The pcb is NOT freed (since we might not be in the right thread context do this). + * + * @param conn the netconn to free + */ +void +netconn_free(struct netconn *conn) +{ + LWIP_ASSERT("PCB must be deallocated outside this function", conn->pcb.tcp == NULL); + +#if LWIP_NETCONN_FULLDUPLEX + /* in fullduplex, netconn is drained here */ + netconn_drain(conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + LWIP_ASSERT("recvmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->recvmbox)); +#if LWIP_TCP + LWIP_ASSERT("acceptmbox must be deallocated before calling this function", + !sys_mbox_valid(&conn->acceptmbox)); +#endif /* LWIP_TCP */ + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&conn->op_completed); + sys_sem_set_invalid(&conn->op_completed); +#endif + + memp_free(MEMP_NETCONN, conn); +} + +/** + * Delete rcvmbox and acceptmbox of a netconn and free the left-over data in + * these mboxes + * + * @param conn the netconn to free + * @bytes_drained bytes drained from recvmbox + * @accepts_drained pending connections drained from acceptmbox + */ +static void +netconn_drain(struct netconn *conn) +{ + void *mem; + + /* This runs when mbox and netconn are marked as closed, + so we don't need to lock against rx packets */ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("netconn marked closed", conn->flags & NETCONN_FLAG_MBOXINVALID); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + /* Delete and drain the recvmbox. */ + if (sys_mbox_valid(&conn->recvmbox)) { + while (sys_mbox_tryfetch(&conn->recvmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { +#if LWIP_TCP + if (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + pbuf_free((struct pbuf *)mem); + } + } else +#endif /* LWIP_TCP */ + { + netbuf_delete((struct netbuf *)mem); + } + } + } + sys_mbox_free(&conn->recvmbox); + sys_mbox_set_invalid(&conn->recvmbox); + } + + /* Delete and drain the acceptmbox. */ +#if LWIP_TCP + if (sys_mbox_valid(&conn->acceptmbox)) { + while (sys_mbox_tryfetch(&conn->acceptmbox, &mem) != SYS_MBOX_EMPTY) { +#if LWIP_NETCONN_FULLDUPLEX + if (!lwip_netconn_is_deallocated_msg(mem)) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + err_t err; + if (!lwip_netconn_is_err_msg(mem, &err)) { + struct netconn *newconn = (struct netconn *)mem; + /* Only tcp pcbs have an acceptmbox, so no need to check conn->type */ + /* pcb might be set to NULL already by err_tcp() */ + /* drain recvmbox */ + netconn_drain(newconn); + if (newconn->pcb.tcp != NULL) { + tcp_abort(newconn->pcb.tcp); + newconn->pcb.tcp = NULL; + } + netconn_free(newconn); + } + } + } + sys_mbox_free(&conn->acceptmbox); + sys_mbox_set_invalid(&conn->acceptmbox); + } +#endif /* LWIP_TCP */ +} + +#if LWIP_NETCONN_FULLDUPLEX +static void +netconn_mark_mbox_invalid(struct netconn *conn) +{ + int i, num_waiting; + void *msg = LWIP_CONST_CAST(void *, &netconn_deleted); + + /* Prevent new calls/threads from reading from the mbox */ + conn->flags |= NETCONN_FLAG_MBOXINVALID; + + SYS_ARCH_LOCKED(num_waiting = conn->mbox_threads_waiting); + for (i = 0; i < num_waiting; i++) { + if (sys_mbox_valid_val(conn->recvmbox)) { + sys_mbox_trypost(&conn->recvmbox, msg); + } else { + sys_mbox_trypost(&conn->acceptmbox, msg); + } + } +} +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +#if LWIP_TCP +/** + * Internal helper function to close a TCP netconn: since this sometimes + * doesn't work at the first attempt, this function is called from multiple + * places. + * + * @param conn the TCP netconn to close + */ +static err_t +lwip_netconn_do_close_internal(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + u8_t shut, shut_rx, shut_tx, shut_close; + u8_t close_finished = 0; + struct tcp_pcb *tpcb; +#if LWIP_SO_LINGER + u8_t linger_wait_required = 0; +#endif /* LWIP_SO_LINGER */ + + LWIP_ASSERT("invalid conn", (conn != NULL)); + LWIP_ASSERT("this is for tcp netconns only", (NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP)); + LWIP_ASSERT("conn must be in state NETCONN_CLOSE", (conn->state == NETCONN_CLOSE)); + LWIP_ASSERT("pcb already closed", (conn->pcb.tcp != NULL)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + + tpcb = conn->pcb.tcp; + shut = conn->current_msg->msg.sd.shut; + shut_rx = shut & NETCONN_SHUT_RD; + shut_tx = shut & NETCONN_SHUT_WR; + /* shutting down both ends is the same as closing + (also if RD or WR side was shut down before already) */ + if (shut == NETCONN_SHUT_RDWR) { + shut_close = 1; + } else if (shut_rx && + ((tpcb->state == FIN_WAIT_1) || + (tpcb->state == FIN_WAIT_2) || + (tpcb->state == CLOSING))) { + shut_close = 1; + } else if (shut_tx && ((tpcb->flags & TF_RXCLOSED) != 0)) { + shut_close = 1; + } else { + shut_close = 0; + } + + /* Set back some callback pointers */ + if (shut_close) { + tcp_arg(tpcb, NULL); + } + if (tpcb->state == LISTEN) { + tcp_accept(tpcb, NULL); + } else { + /* some callbacks have to be reset if tcp_close is not successful */ + if (shut_rx) { + tcp_recv(tpcb, NULL); + tcp_accept(tpcb, NULL); + } + if (shut_tx) { + tcp_sent(tpcb, NULL); + } + if (shut_close) { + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, NULL); + } + } + /* Try to close the connection */ + if (shut_close) { +#if LWIP_SO_LINGER + /* check linger possibilites before calling tcp_close */ + err = ERR_OK; + /* linger enabled/required at all? (i.e. is there untransmitted data left?) */ + if ((conn->linger >= 0) && (conn->pcb.tcp->unsent || conn->pcb.tcp->unacked)) { + if ((conn->linger == 0)) { + /* data left but linger prevents waiting */ + tcp_abort(tpcb); + tpcb = NULL; + } else if (conn->linger > 0) { + /* data left and linger says we should wait */ + if (netconn_is_nonblocking(conn)) { + /* data left on a nonblocking netconn -> cannot linger */ + err = ERR_WOULDBLOCK; + } else if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= + (conn->linger * 1000)) { + /* data left but linger timeout has expired (this happens on further + calls to this function through poll_tcp */ + tcp_abort(tpcb); + tpcb = NULL; + } else { + /* data left -> need to wait for ACK after successful close */ + linger_wait_required = 1; + } + } + } + if ((err == ERR_OK) && (tpcb != NULL)) +#endif /* LWIP_SO_LINGER */ + { + err = tcp_close(tpcb); + } + } else { + err = tcp_shutdown(tpcb, shut_rx, shut_tx); + } + if (err == ERR_OK) { + close_finished = 1; +#if LWIP_SO_LINGER + if (linger_wait_required) { + /* wait for ACK of all unsent/unacked data by just getting called again */ + close_finished = 0; + err = ERR_INPROGRESS; + } +#endif /* LWIP_SO_LINGER */ + } else { + if (err == ERR_MEM) { + /* Closing failed because of memory shortage, try again later. Even for + nonblocking netconns, we have to wait since no standard socket application + is prepared for close failing because of resource shortage. + Check the timeout: this is kind of an lwip addition to the standard sockets: + we wait for some time when failing to allocate a segment for the FIN */ +#if LWIP_SO_SNDTIMEO || LWIP_SO_LINGER + s32_t close_timeout = LWIP_TCP_CLOSE_TIMEOUT_MS_DEFAULT; +#if LWIP_SO_SNDTIMEO + if (conn->send_timeout > 0) { + close_timeout = conn->send_timeout; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_LINGER + if (conn->linger >= 0) { + /* use linger timeout (seconds) */ + close_timeout = conn->linger * 1000U; + } +#endif + if ((s32_t)(sys_now() - conn->current_msg->msg.sd.time_started) >= close_timeout) { +#else /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + if (conn->current_msg->msg.sd.polls_left == 0) { +#endif /* LWIP_SO_SNDTIMEO || LWIP_SO_LINGER */ + close_finished = 1; + if (shut_close) { + /* in this case, we want to RST the connection */ + tcp_abort(tpcb); + err = ERR_OK; + } + } + } else { + /* Closing failed for a non-memory error: give up */ + close_finished = 1; + } + } + if (close_finished) { + /* Closing done (succeeded, non-memory error, nonblocking error or timeout) */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + if (err == ERR_OK) { + if (shut_close) { + /* Set back some callback pointers as conn is going away */ + conn->pcb.tcp = NULL; + /* Trigger select() in socket layer. Make sure everybody notices activity + on the connection, error first! */ + API_EVENT(conn, NETCONN_EVT_ERROR, 0); + } + if (shut_rx) { + API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0); + } + if (shut_tx) { + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + } + } +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + /* wake up the application task */ + sys_sem_signal(op_completed_sem); + } + return ERR_OK; + } + if (!close_finished) { + /* Closing failed and we want to wait: restore some of the callbacks */ + /* Closing of listen pcb will never fail! */ + LWIP_ASSERT("Closing a listen pcb may not fail!", (tpcb->state != LISTEN)); + if (shut_tx) { + tcp_sent(tpcb, sent_tcp); + } + /* when waiting for close, set up poll interval to 500ms */ + tcp_poll(tpcb, poll_tcp, 1); + tcp_err(tpcb, err_tcp); + tcp_arg(tpcb, conn); + /* don't restore recv callback: we don't want to receive any more data */ + } + /* If closing didn't succeed, we get called again either + from poll_tcp or from sent_tcp */ + LWIP_ASSERT("err != ERR_OK", err != ERR_OK); + return err; +} +#endif /* LWIP_TCP */ + +/** + * Delete the pcb inside a netconn. + * Called from netconn_delete. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_delconn(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + enum netconn_state state = msg->conn->state; + LWIP_ASSERT("netconn state error", /* this only happens for TCP netconns */ + (state == NETCONN_NONE) || (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP)); +#if LWIP_NETCONN_FULLDUPLEX + /* In full duplex mode, blocking write/connect is aborted with ERR_CLSD */ + if (state != NETCONN_NONE) { + if ((state == NETCONN_WRITE) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* close requested, abort running write/connect */ + sys_sem_t *op_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + op_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + sys_sem_signal(op_completed_sem); + } + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + if (((state != NETCONN_NONE) && + (state != NETCONN_LISTEN) && + (state != NETCONN_CONNECT)) || + ((state == NETCONN_CONNECT) && !IN_NONBLOCKING_CONNECT(msg->conn))) { + /* This means either a blocking write or blocking connect is running + (nonblocking write returns and sets state to NONE) */ + msg->err = ERR_INPROGRESS; + } else +#endif /* LWIP_NETCONN_FULLDUPLEX */ + { + LWIP_ASSERT("blocking connect in progress", + (state != NETCONN_CONNECT) || IN_NONBLOCKING_CONNECT(msg->conn)); + msg->err = ERR_OK; +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + if (msg->conn->pcb.tcp != NULL) { + + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_remove(msg->conn->pcb.raw); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + msg->conn->pcb.udp->recv_arg = NULL; + udp_remove(msg->conn->pcb.udp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->msg.sd.shut = NETCONN_SHUT_RDWR; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* API_EVENT is called inside lwip_netconn_do_close_internal, before releasing + the application thread, so we can return at this point! */ + return; +#endif /* LWIP_TCP */ + default: + break; + } + msg->conn->pcb.tcp = NULL; + } + /* tcp netconns don't come here! */ + + /* @todo: this lets select make the socket readable and writable, + which is wrong! errfd instead? */ + API_EVENT(msg->conn, NETCONN_EVT_RCVPLUS, 0); + API_EVENT(msg->conn, NETCONN_EVT_SENDPLUS, 0); + } + if (sys_sem_valid(LWIP_API_MSG_SEM(msg))) { + TCPIP_APIMSG_ACK(msg); + } +} + +/** + * Bind a pcb contained in a netconn + * Called from netconn_bind. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_bind(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_bind(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + err = tcp_bind(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +/** + * Bind a pcb contained in a netconn to an interface + * Called from netconn_bind_if. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to bind to + */ +void +lwip_netconn_do_bind_if(void *m) +{ + struct netif *netif; + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + netif = netif_get_by_index(msg->msg.bc.if_idx); + + if ((netif != NULL) && (msg->conn->pcb.tcp != NULL)) { + err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(msg->conn->pcb.raw, netif); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(msg->conn->pcb.udp, netif); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(msg->conn->pcb.tcp, netif); + break; +#endif /* LWIP_TCP */ + default: + err = ERR_VAL; + break; + } + } else { + err = ERR_VAL; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * TCP callback function if a connection (opened by tcp_connect/lwip_netconn_do_connect) has + * been established (or reset by the remote host). + * + * @see tcp.h (struct tcp_pcb.connected) for parameters and return values + */ +static err_t +lwip_netconn_do_connected(void *arg, struct tcp_pcb *pcb, err_t err) +{ + struct netconn *conn; + int was_blocking; + sys_sem_t *op_completed_sem = NULL; + + LWIP_UNUSED_ARG(pcb); + + conn = (struct netconn *)arg; + + if (conn == NULL) { + return ERR_VAL; + } + + LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); + LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", + (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); + + if (conn->current_msg != NULL) { + conn->current_msg->err = err; + op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + } + if ((NETCONNTYPE_GROUP(conn->type) == NETCONN_TCP) && (err == ERR_OK)) { + setup_tcp(conn); + } + was_blocking = !IN_NONBLOCKING_CONNECT(conn); + SET_NONBLOCKING_CONNECT(conn, 0); + LWIP_ASSERT("blocking connect state error", + (was_blocking && op_completed_sem != NULL) || + (!was_blocking && op_completed_sem == NULL)); + conn->current_msg = NULL; + conn->state = NETCONN_NONE; + API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); + + if (was_blocking) { + sys_sem_signal(op_completed_sem); + } + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Connect a pcb contained inside a netconn + * Called from netconn_connect. + * + * @param m the api_msg pointing to the connection and containing + * the IP address and port to connect to + */ +void +lwip_netconn_do_connect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp == NULL) { + /* This may happen when calling netconn_connect() a second time */ + err = ERR_CLSD; + } else { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + err = raw_connect(msg->conn->pcb.raw, API_EXPR_REF(msg->msg.bc.ipaddr)); + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + err = udp_connect(msg->conn->pcb.udp, API_EXPR_REF(msg->msg.bc.ipaddr), msg->msg.bc.port); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + /* Prevent connect while doing any other action. */ + if (msg->conn->state == NETCONN_CONNECT) { + err = ERR_ALREADY; + } else if (msg->conn->state != NETCONN_NONE) { + err = ERR_ISCONN; + } else { + setup_tcp(msg->conn); + err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), + msg->msg.bc.port, lwip_netconn_do_connected); + if (err == ERR_OK) { + u8_t non_blocking = netconn_is_nonblocking(msg->conn); + msg->conn->state = NETCONN_CONNECT; + SET_NONBLOCKING_CONNECT(msg->conn, non_blocking); + if (non_blocking) { + err = ERR_INPROGRESS; + } else { + msg->conn->current_msg = msg; + /* sys_sem_signal() is called from lwip_netconn_do_connected (or err_tcp()), + when the connection is established! */ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CONNECT); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_CONNECT); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + return; + } + } + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ERROR("Invalid netconn type", 0, do { + err = ERR_VAL; + } while (0)); + break; + } + } + msg->err = err; + /* For all other protocols, netconn_connect() calls netconn_apimsg(), + so use TCPIP_APIMSG_ACK() here. */ + TCPIP_APIMSG_ACK(msg); +} + +/** + * Disconnect a pcb contained inside a netconn + * Only used for UDP netconns. + * Called from netconn_disconnect. + * + * @param m the api_msg pointing to the connection to disconnect + */ +void +lwip_netconn_do_disconnect(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_UDP + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { + udp_disconnect(msg->conn->pcb.udp); + msg->err = ERR_OK; + } else +#endif /* LWIP_UDP */ + { + msg->err = ERR_VAL; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Set a TCP pcb contained in a netconn into listen mode + * Called from netconn_listen. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_listen(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + err_t err; + + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + if (msg->conn->state == NETCONN_NONE) { + struct tcp_pcb *lpcb; + if (msg->conn->pcb.tcp->state != CLOSED) { + /* connection is not closed, cannot listen */ + err = ERR_VAL; + } else { + u8_t backlog; +#if TCP_LISTEN_BACKLOG + backlog = msg->msg.lb.backlog; +#else /* TCP_LISTEN_BACKLOG */ + backlog = TCP_DEFAULT_LISTEN_BACKLOG; +#endif /* TCP_LISTEN_BACKLOG */ +#if LWIP_IPV4 && LWIP_IPV6 + /* "Socket API like" dual-stack support: If IP to listen to is IP6_ADDR_ANY, + * and NETCONN_FLAG_IPV6_V6ONLY is NOT set, use IP_ANY_TYPE to listen + */ + if (ip_addr_cmp(&msg->conn->pcb.ip->local_ip, IP6_ADDR_ANY) && + (netconn_get_ipv6only(msg->conn) == 0)) { + /* change PCB type to IPADDR_TYPE_ANY */ + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->local_ip, IPADDR_TYPE_ANY); + IP_SET_TYPE_VAL(msg->conn->pcb.tcp->remote_ip, IPADDR_TYPE_ANY); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + lpcb = tcp_listen_with_backlog_and_err(msg->conn->pcb.tcp, backlog, &err); + + if (lpcb == NULL) { + /* in this case, the old pcb is still allocated */ + } else { + /* delete the recvmbox and allocate the acceptmbox */ + if (sys_mbox_valid(&msg->conn->recvmbox)) { + /** @todo: should we drain the recvmbox here? */ + sys_mbox_free(&msg->conn->recvmbox); + sys_mbox_set_invalid(&msg->conn->recvmbox); + } + err = ERR_OK; + if (!sys_mbox_valid(&msg->conn->acceptmbox)) { + err = sys_mbox_new(&msg->conn->acceptmbox, DEFAULT_ACCEPTMBOX_SIZE); + } + if (err == ERR_OK) { + msg->conn->state = NETCONN_LISTEN; + msg->conn->pcb.tcp = lpcb; + tcp_arg(msg->conn->pcb.tcp, msg->conn); + tcp_accept(msg->conn->pcb.tcp, accept_function); + } else { + /* since the old pcb is already deallocated, free lpcb now */ + tcp_close(lpcb); + msg->conn->pcb.tcp = NULL; + } + } + } + } else if (msg->conn->state == NETCONN_LISTEN) { + /* already listening, allow updating of the backlog */ + err = ERR_OK; + tcp_backlog_set(msg->conn->pcb.tcp, msg->msg.lb.backlog); + } else { + err = ERR_CONN; + } + } else { + err = ERR_ARG; + } + } else { + err = ERR_CONN; + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a RAW or UDP pcb contained in a netconn + * Called from netconn_send + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_send(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (msg->conn->pcb.tcp != NULL) { + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = raw_send(msg->conn->pcb.raw, msg->msg.b->p); + } else { + err = raw_sendto(msg->conn->pcb.raw, msg->msg.b->p, &msg->msg.b->addr); + } + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: +#if LWIP_CHECKSUM_ON_COPY + if (ip_addr_isany(&msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send_chksum(msg->conn->pcb.udp, msg->msg.b->p, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } else { + err = udp_sendto_chksum(msg->conn->pcb.udp, msg->msg.b->p, + &msg->msg.b->addr, msg->msg.b->port, + msg->msg.b->flags & NETBUF_FLAG_CHKSUM, msg->msg.b->toport_chksum); + } +#else /* LWIP_CHECKSUM_ON_COPY */ + if (ip_addr_isany_val(msg->msg.b->addr) || IP_IS_ANY_TYPE_VAL(msg->msg.b->addr)) { + err = udp_send(msg->conn->pcb.udp, msg->msg.b->p); + } else { + err = udp_sendto(msg->conn->pcb.udp, msg->msg.b->p, &msg->msg.b->addr, msg->msg.b->port); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + break; +#endif /* LWIP_UDP */ + default: + err = ERR_CONN; + break; + } + } else { + err = ERR_CONN; + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_TCP +/** + * Indicate data has been received from a TCP pcb contained in a netconn + * Called from netconn_recv + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_recv(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + size_t remaining = msg->msg.r.len; + do { + u16_t recved = (u16_t)((remaining > 0xffff) ? 0xffff : remaining); + tcp_recved(msg->conn->pcb.tcp, recved); + remaining -= recved; + } while (remaining != 0); + } + } + TCPIP_APIMSG_ACK(msg); +} + +#if TCP_LISTEN_BACKLOG +/** Indicate that a TCP pcb has been accepted + * Called from netconn_accept + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_accepted(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_OK; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { + tcp_backlog_accepted(msg->conn->pcb.tcp); + } + } + TCPIP_APIMSG_ACK(msg); +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * See if more data needs to be written from a previous call to netconn_write. + * Called initially from lwip_netconn_do_write. If the first call can't send all data + * (because of low memory or empty send-buffer), this function is called again + * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the + * blocking application thread (waiting in netconn_write) is released. + * + * @param conn netconn (that is currently in state NETCONN_WRITE) to process + * @return ERR_OK + * ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished + */ +static err_t +lwip_netconn_do_writemore(struct netconn *conn WRITE_DELAYED_PARAM) +{ + err_t err; + const void *dataptr; + u16_t len, available; + u8_t write_finished = 0; + size_t diff; + u8_t dontblock; + u8_t apiflags; + u8_t write_more; + + LWIP_ASSERT("conn != NULL", conn != NULL); + LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE)); + LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL); + LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL); + LWIP_ASSERT("conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len", + conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len); + LWIP_ASSERT("conn->current_msg->msg.w.vector_cnt > 0", conn->current_msg->msg.w.vector_cnt > 0); + + apiflags = conn->current_msg->msg.w.apiflags; + dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); + +#if LWIP_SO_SNDTIMEO + if ((conn->send_timeout != 0) && + ((s32_t)(sys_now() - conn->current_msg->msg.w.time_started) >= conn->send_timeout)) { + write_finished = 1; + if (conn->current_msg->msg.w.offset == 0) { + /* nothing has been written */ + err = ERR_WOULDBLOCK; + } else { + /* partial write */ + err = ERR_OK; + } + } else +#endif /* LWIP_SO_SNDTIMEO */ + { + do { + dataptr = (const u8_t *)conn->current_msg->msg.w.vector->ptr + conn->current_msg->msg.w.vector_off; + diff = conn->current_msg->msg.w.vector->len - conn->current_msg->msg.w.vector_off; + if (diff > 0xffffUL) { /* max_u16_t */ + len = 0xffff; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + len = (u16_t)diff; + } + available = tcp_sndbuf(conn->pcb.tcp); + if (available < len) { + /* don't try to write more than sendbuf */ + len = available; + if (dontblock) { + if (!len) { + /* set error according to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + goto err_mem; + } + } else { + apiflags |= TCP_WRITE_FLAG_MORE; + } + } + LWIP_ASSERT("lwip_netconn_do_writemore: invalid length!", + ((conn->current_msg->msg.w.vector_off + len) <= conn->current_msg->msg.w.vector->len)); + /* we should loop around for more sending in the following cases: + 1) We couldn't finish the current vector because of 16-bit size limitations. + tcp_write() and tcp_sndbuf() both are limited to 16-bit sizes + 2) We are sending the remainder of the current vector and have more */ + if ((len == 0xffff && diff > 0xffffUL) || + (len == (u16_t)diff && conn->current_msg->msg.w.vector_cnt > 1)) { + write_more = 1; + apiflags |= TCP_WRITE_FLAG_MORE; + } else { + write_more = 0; + } + err = tcp_write(conn->pcb.tcp, dataptr, len, apiflags); + if (err == ERR_OK) { + conn->current_msg->msg.w.offset += len; + conn->current_msg->msg.w.vector_off += len; + /* check if current vector is finished */ + if (conn->current_msg->msg.w.vector_off == conn->current_msg->msg.w.vector->len) { + conn->current_msg->msg.w.vector_cnt--; + /* if we have additional vectors, move on to them */ + if (conn->current_msg->msg.w.vector_cnt > 0) { + conn->current_msg->msg.w.vector++; + conn->current_msg->msg.w.vector_off = 0; + } + } + } + } while (write_more && err == ERR_OK); + /* if OK or memory error, check available space */ + if ((err == ERR_OK) || (err == ERR_MEM)) { +err_mem: + if (dontblock && (conn->current_msg->msg.w.offset < conn->current_msg->msg.w.len)) { + /* non-blocking write did not write everything: mark the pcb non-writable + and let poll_tcp check writable space to mark the pcb writable again */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + /* The queued byte- or pbuf-count exceeds the configured low-water limit, + let select mark this pcb as non-writable. */ + API_EVENT(conn, NETCONN_EVT_SENDMINUS, 0); + } + } + + if (err == ERR_OK) { + err_t out_err; + if ((conn->current_msg->msg.w.offset == conn->current_msg->msg.w.len) || dontblock) { + /* return sent length (caller reads length from msg.w.offset) */ + write_finished = 1; + } + out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } + } else if (err == ERR_MEM) { + /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called. + For blocking sockets, we do NOT return to the application + thread, since ERR_MEM is only a temporary error! Non-blocking + will remain non-writable until sent_tcp/poll_tcp is called */ + + /* tcp_write returned ERR_MEM, try tcp_output anyway */ + err_t out_err = tcp_output(conn->pcb.tcp); + if (out_err == ERR_RTE) { + /* If tcp_output fails because no route is found, + don't try writing any more but return the error + to the application thread. */ + err = out_err; + write_finished = 1; + } else if (dontblock) { + /* non-blocking write is done on ERR_MEM, set error according + to partial write or not */ + err = (conn->current_msg->msg.w.offset == 0) ? ERR_WOULDBLOCK : ERR_OK; + write_finished = 1; + } + } else { + /* On errors != ERR_MEM, we don't try writing any more but return + the error to the application thread. */ + write_finished = 1; + } + } + if (write_finished) { + /* everything was written: set back connection state + and back to application task */ + sys_sem_t *op_completed_sem = LWIP_API_MSG_SEM(conn->current_msg); + conn->current_msg->err = err; + conn->current_msg = NULL; + conn->state = NETCONN_NONE; +#if LWIP_TCPIP_CORE_LOCKING + if (delayed) +#endif + { + sys_sem_signal(op_completed_sem); + } + } +#if LWIP_TCPIP_CORE_LOCKING + else { + return ERR_MEM; + } +#endif + return ERR_OK; +} +#endif /* LWIP_TCP */ + +/** + * Send some data on a TCP pcb contained in a netconn + * Called from netconn_write + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_write(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + err_t err = netconn_err(msg->conn); + if (err == ERR_OK) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { +#if LWIP_TCP + if (msg->conn->state != NETCONN_NONE) { + /* netconn is connecting, closing or in blocking write */ + err = ERR_INPROGRESS; + } else if (msg->conn->pcb.tcp != NULL) { + msg->conn->state = NETCONN_WRITE; + /* set all the variables used by lwip_netconn_do_writemore */ + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_writemore(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG + since lwip_netconn_do_writemore ACKs it! */ + return; + } else { + err = ERR_CONN; + } +#else /* LWIP_TCP */ + err = ERR_VAL; +#endif /* LWIP_TCP */ +#if (LWIP_UDP || LWIP_RAW) + } else { + err = ERR_VAL; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + } + msg->err = err; + TCPIP_APIMSG_ACK(msg); +} + +/** + * Return a connection's local or remote address + * Called from netconn_getaddr + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_getaddr(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + if (msg->conn->pcb.ip != NULL) { + if (msg->msg.ad.local) { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->local_ip); + } else { + ip_addr_copy(API_EXPR_DEREF(msg->msg.ad.ipaddr), + msg->conn->pcb.ip->remote_ip); + } + + msg->err = ERR_OK; + switch (NETCONNTYPE_GROUP(msg->conn->type)) { +#if LWIP_RAW + case NETCONN_RAW: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.raw->protocol; + } else { + /* return an error as connecting is only a helper for upper layers */ + msg->err = ERR_CONN; + } + break; +#endif /* LWIP_RAW */ +#if LWIP_UDP + case NETCONN_UDP: + if (msg->msg.ad.local) { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->local_port; + } else { + if ((msg->conn->pcb.udp->flags & UDP_FLAGS_CONNECTED) == 0) { + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = msg->conn->pcb.udp->remote_port; + } + } + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case NETCONN_TCP: + if ((msg->msg.ad.local == 0) && + ((msg->conn->pcb.tcp->state == CLOSED) || (msg->conn->pcb.tcp->state == LISTEN))) { + /* pcb is not connected and remote name is requested */ + msg->err = ERR_CONN; + } else { + API_EXPR_DEREF(msg->msg.ad.port) = (msg->msg.ad.local ? msg->conn->pcb.tcp->local_port : msg->conn->pcb.tcp->remote_port); + } + break; +#endif /* LWIP_TCP */ + default: + LWIP_ASSERT("invalid netconn_type", 0); + break; + } + } else { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +/** + * Close or half-shutdown a TCP pcb contained in a netconn + * Called from netconn_close + * In contrast to closing sockets, the netconn is not deallocated. + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_close(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + +#if LWIP_TCP + enum netconn_state state = msg->conn->state; + /* First check if this is a TCP netconn and if it is in a correct state + (LISTEN doesn't support half shutdown) */ + if ((msg->conn->pcb.tcp != NULL) && + (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) && + ((msg->msg.sd.shut == NETCONN_SHUT_RDWR) || (state != NETCONN_LISTEN))) { + /* Check if we are in a connected state */ + if (state == NETCONN_CONNECT) { + /* TCP connect in progress: cannot shutdown */ + msg->err = ERR_CONN; + } else if (state == NETCONN_WRITE) { +#if LWIP_NETCONN_FULLDUPLEX + if (msg->msg.sd.shut & NETCONN_SHUT_WR) { + /* close requested, abort running write */ + sys_sem_t *write_completed_sem; + LWIP_ASSERT("msg->conn->current_msg != NULL", msg->conn->current_msg != NULL); + write_completed_sem = LWIP_API_MSG_SEM(msg->conn->current_msg); + msg->conn->current_msg->err = ERR_CLSD; + msg->conn->current_msg = NULL; + msg->conn->state = NETCONN_NONE; + state = NETCONN_NONE; + sys_sem_signal(write_completed_sem); + } else { + LWIP_ASSERT("msg->msg.sd.shut == NETCONN_SHUT_RD", msg->msg.sd.shut == NETCONN_SHUT_RD); + /* In this case, let the write continue and do not interfere with + conn->current_msg or conn->state! */ + msg->err = tcp_shutdown(msg->conn->pcb.tcp, 1, 0); + } + } + if (state == NETCONN_NONE) { +#else /* LWIP_NETCONN_FULLDUPLEX */ + msg->err = ERR_INPROGRESS; + } else { +#endif /* LWIP_NETCONN_FULLDUPLEX */ + if (msg->msg.sd.shut & NETCONN_SHUT_RD) { +#if LWIP_NETCONN_FULLDUPLEX + /* Mark mboxes invalid */ + netconn_mark_mbox_invalid(msg->conn); +#else /* LWIP_NETCONN_FULLDUPLEX */ + netconn_drain(msg->conn); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + } + LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL); + msg->conn->state = NETCONN_CLOSE; + msg->conn->current_msg = msg; +#if LWIP_TCPIP_CORE_LOCKING + if (lwip_netconn_do_close_internal(msg->conn, 0) != ERR_OK) { + LWIP_ASSERT("state!", msg->conn->state == NETCONN_CLOSE); + UNLOCK_TCPIP_CORE(); + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + lwip_netconn_do_close_internal(msg->conn); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + /* for tcp netconns, lwip_netconn_do_close_internal ACKs the message */ + return; + } + } else +#endif /* LWIP_TCP */ + { + msg->err = ERR_CONN; + } + TCPIP_APIMSG_ACK(msg); +} + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup(ip_2_ip6(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup(ip_2_ip4(API_EXPR_REF(msg->msg.jl.netif_addr)), + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + TCPIP_APIMSG_ACK(msg); +} +/** + * Join multicast groups for UDP netconns. + * Called from netconn_join_leave_group_netif + * + * @param m the api_msg pointing to the connection + */ +void +lwip_netconn_do_join_leave_group_netif(void *m) +{ + struct api_msg *msg = (struct api_msg *)m; + struct netif *netif; + + netif = netif_get_by_index(msg->msg.jl.if_idx); + if (netif == NULL) { + msg->err = ERR_IF; + goto done; + } + + msg->err = ERR_CONN; + if (msg->conn->pcb.tcp != NULL) { + if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_UDP) { +#if LWIP_UDP +#if LWIP_IPV6 && LWIP_IPV6_MLD + if (NETCONNTYPE_ISIPV6(msg->conn->type)) { + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = mld6_joingroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = mld6_leavegroup_netif(netif, + ip_2_ip6(API_EXPR_REF(msg->msg.jl.multiaddr))); + } + } else +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ + { +#if LWIP_IGMP + if (msg->msg.jl.join_or_leave == NETCONN_JOIN) { + msg->err = igmp_joingroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } else { + msg->err = igmp_leavegroup_netif(netif, + ip_2_ip4(API_EXPR_REF(msg->msg.jl.multiaddr))); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_UDP */ +#if (LWIP_TCP || LWIP_RAW) + } else { + msg->err = ERR_VAL; +#endif /* (LWIP_TCP || LWIP_RAW) */ + } + } + +done: + TCPIP_APIMSG_ACK(msg); +} +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/** + * Callback function that is called when DNS name is resolved + * (or on timeout). A waiting application thread is waked up by + * signaling the semaphore. + */ +static void +lwip_netconn_do_dns_found(const char *name, const ip_addr_t *ipaddr, void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + + /* we trust the internal implementation to be correct :-) */ + LWIP_UNUSED_ARG(name); + + if (ipaddr == NULL) { + /* timeout or memory error */ + API_EXPR_DEREF(msg->err) = ERR_VAL; + } else { + /* address was resolved */ + API_EXPR_DEREF(msg->err) = ERR_OK; + API_EXPR_DEREF(msg->addr) = *ipaddr; + } + /* wake up the application task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); +} + +/** + * Execute a DNS query + * Called from netconn_gethostbyname + * + * @param arg the dns_api_msg pointing to the query + */ +void +lwip_netconn_do_gethostbyname(void *arg) +{ + struct dns_api_msg *msg = (struct dns_api_msg *)arg; + u8_t addrtype = +#if LWIP_IPV4 && LWIP_IPV6 + msg->dns_addrtype; +#else + LWIP_DNS_ADDRTYPE_DEFAULT; +#endif + + API_EXPR_DEREF(msg->err) = dns_gethostbyname_addrtype(msg->name, + API_EXPR_REF(msg->addr), lwip_netconn_do_dns_found, msg, addrtype); +#if LWIP_TCPIP_CORE_LOCKING + /* For core locking, only block if we need to wait for answer/timeout */ + if (API_EXPR_DEREF(msg->err) == ERR_INPROGRESS) { + UNLOCK_TCPIP_CORE(); + sys_sem_wait(API_EXPR_REF_SEM(msg->sem)); + LOCK_TCPIP_CORE(); + LWIP_ASSERT("do_gethostbyname still in progress!!", API_EXPR_DEREF(msg->err) != ERR_INPROGRESS); + } +#else /* LWIP_TCPIP_CORE_LOCKING */ + if (API_EXPR_DEREF(msg->err) != ERR_INPROGRESS) { + /* on error or immediate success, wake up the application + * task waiting in netconn_gethostbyname */ + sys_sem_signal(API_EXPR_REF_SEM(msg->sem)); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} +#endif /* LWIP_DNS */ + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c new file mode 100644 index 0000000..f2d7180 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/err.c @@ -0,0 +1,115 @@ +/** + * @file + * Error Management module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/err.h" +#include "lwip/def.h" +#include "lwip/sys.h" + +#include "lwip/errno.h" + +#if !NO_SYS +/** Table to quickly map an lwIP error (err_t) to a socket error + * by using -err as an index */ +static const int err_to_errno_table[] = { + 0, /* ERR_OK 0 No error, everything OK. */ + ENOMEM, /* ERR_MEM -1 Out of memory error. */ + ENOBUFS, /* ERR_BUF -2 Buffer error. */ + EWOULDBLOCK, /* ERR_TIMEOUT -3 Timeout */ + EHOSTUNREACH, /* ERR_RTE -4 Routing problem. */ + EINPROGRESS, /* ERR_INPROGRESS -5 Operation in progress */ + EINVAL, /* ERR_VAL -6 Illegal value. */ + EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ + EADDRINUSE, /* ERR_USE -8 Address in use. */ + EALREADY, /* ERR_ALREADY -9 Already connecting. */ + EISCONN, /* ERR_ISCONN -10 Conn already established.*/ + ENOTCONN, /* ERR_CONN -11 Not connected. */ + -1, /* ERR_IF -12 Low-level netif error */ + ECONNABORTED, /* ERR_ABRT -13 Connection aborted. */ + ECONNRESET, /* ERR_RST -14 Connection reset. */ + ENOTCONN, /* ERR_CLSD -15 Connection closed. */ + EIO /* ERR_ARG -16 Illegal argument. */ +}; + +int +err_to_errno(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_to_errno_table))) { + return EIO; + } + return err_to_errno_table[-err]; +} +#endif /* !NO_SYS */ + +#ifdef LWIP_DEBUG + +static const char *err_strerr[] = { + "Ok.", /* ERR_OK 0 */ + "Out of memory error.", /* ERR_MEM -1 */ + "Buffer error.", /* ERR_BUF -2 */ + "Timeout.", /* ERR_TIMEOUT -3 */ + "Routing problem.", /* ERR_RTE -4 */ + "Operation in progress.", /* ERR_INPROGRESS -5 */ + "Illegal value.", /* ERR_VAL -6 */ + "Operation would block.", /* ERR_WOULDBLOCK -7 */ + "Address in use.", /* ERR_USE -8 */ + "Already connecting.", /* ERR_ALREADY -9 */ + "Already connected.", /* ERR_ISCONN -10 */ + "Not connected.", /* ERR_CONN -11 */ + "Low-level netif error.", /* ERR_IF -12 */ + "Connection aborted.", /* ERR_ABRT -13 */ + "Connection reset.", /* ERR_RST -14 */ + "Connection closed.", /* ERR_CLSD -15 */ + "Illegal argument." /* ERR_ARG -16 */ +}; + +/** + * Convert an lwip internal error to a string representation. + * + * @param err an lwip internal err_t + * @return a string representation for err + */ +const char * +lwip_strerr(err_t err) +{ + if ((err > 0) || (-err >= (err_t)LWIP_ARRAYSIZE(err_strerr))) { + return "Unknown error."; + } + return err_strerr[-err]; +} + +#endif /* LWIP_DEBUG */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c new file mode 100644 index 0000000..2eda9f0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/if_api.c @@ -0,0 +1,102 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + * + * @defgroup if_api Interface Identification API + * @ingroup socket + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#include "lwip/opt.h" + +#if LWIP_SOCKET + +#include "lwip/errno.h" +#include "lwip/if_api.h" +#include "lwip/netifapi.h" +#include "lwip/priv/sockets_priv.h" + +/** + * @ingroup if_api + * Maps an interface index to its corresponding name. + * @param ifindex interface index + * @param ifname shall point to a buffer of at least {IF_NAMESIZE} bytes + * @return If ifindex is an interface index, then the function shall return the + * value supplied in ifname, which points to a buffer now containing the interface name. + * Otherwise, the function shall return a NULL pointer. + */ +char * +lwip_if_indextoname(unsigned int ifindex, char *ifname) +{ +#if LWIP_NETIF_API + if (ifindex <= 0xff) { + err_t err = netifapi_netif_index_to_name((u8_t)ifindex, ifname); + if (!err && ifname[0] != '\0') { + return ifname; + } + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifindex); + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + set_errno(ENXIO); + return NULL; +} + +/** + * @ingroup if_api + * Returs the interface index corresponding to name ifname. + * @param ifname Interface name + * @return The corresponding index if ifname is the name of an interface; + * otherwise, zero. + */ +unsigned int +lwip_if_nametoindex(const char *ifname) +{ +#if LWIP_NETIF_API + err_t err; + u8_t idx; + + err = netifapi_netif_name_to_index(ifname, &idx); + if (!err) { + return idx; + } +#else /* LWIP_NETIF_API */ + LWIP_UNUSED_ARG(ifname); +#endif /* LWIP_NETIF_API */ + return 0; /* invalid index */ +} + +#endif /* LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c new file mode 100644 index 0000000..b464440 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netbuf.c @@ -0,0 +1,250 @@ +/** + * @file + * Network buffer management + * + * @defgroup netbuf Network buffers + * @ingroup netconn + * Network buffer descriptor for @ref netconn. Based on @ref pbuf internally + * to avoid copying data around.\n + * Buffers must not be shared accross multiple threads, all functions except + * netbuf_new() and netbuf_delete() are not thread-safe. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETCONN /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netbuf.h" +#include "lwip/memp.h" + +#include + +/** + * @ingroup netbuf + * Create (allocate) and initialize a new netbuf. + * The netbuf doesn't yet contain a packet buffer! + * + * @return a pointer to a new netbuf + * NULL on lack of memory + */ +struct +netbuf *netbuf_new(void) +{ + struct netbuf *buf; + + buf = (struct netbuf *)memp_malloc(MEMP_NETBUF); + if (buf != NULL) { + memset(buf, 0, sizeof(struct netbuf)); + } + return buf; +} + +/** + * @ingroup netbuf + * Deallocate a netbuf allocated by netbuf_new(). + * + * @param buf pointer to a netbuf allocated by netbuf_new() + */ +void +netbuf_delete(struct netbuf *buf) +{ + if (buf != NULL) { + if (buf->p != NULL) { + pbuf_free(buf->p); + buf->p = buf->ptr = NULL; + } + memp_free(MEMP_NETBUF, buf); + } +} + +/** + * @ingroup netbuf + * Allocate memory for a packet buffer for a given netbuf. + * + * @param buf the netbuf for which to allocate a packet buffer + * @param size the size of the packet buffer to allocate + * @return pointer to the allocated memory + * NULL if no memory could be allocated + */ +void * +netbuf_alloc(struct netbuf *buf, u16_t size) +{ + LWIP_ERROR("netbuf_alloc: invalid buf", (buf != NULL), return NULL;); + + /* Deallocate any previously allocated memory. */ + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, size, PBUF_RAM); + if (buf->p == NULL) { + return NULL; + } + LWIP_ASSERT("check that first pbuf can hold size", + (buf->p->len >= size)); + buf->ptr = buf->p; + return buf->p->payload; +} + +/** + * @ingroup netbuf + * Free the packet buffer included in a netbuf + * + * @param buf pointer to the netbuf which contains the packet buffer to free + */ +void +netbuf_free(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_free: invalid buf", (buf != NULL), return;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = buf->ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf->flags = 0; + buf->toport_chksum = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ +} + +/** + * @ingroup netbuf + * Let a netbuf reference existing (non-volatile) data. + * + * @param buf netbuf which should reference the data + * @param dataptr pointer to the data to reference + * @param size size of the data + * @return ERR_OK if data is referenced + * ERR_MEM if data couldn't be referenced due to lack of memory + */ +err_t +netbuf_ref(struct netbuf *buf, const void *dataptr, u16_t size) +{ + LWIP_ERROR("netbuf_ref: invalid buf", (buf != NULL), return ERR_ARG;); + if (buf->p != NULL) { + pbuf_free(buf->p); + } + buf->p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (buf->p == NULL) { + buf->ptr = NULL; + return ERR_MEM; + } + ((struct pbuf_rom *)buf->p)->payload = dataptr; + buf->p->len = buf->p->tot_len = size; + buf->ptr = buf->p; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Chain one netbuf to another (@see pbuf_chain) + * + * @param head the first netbuf + * @param tail netbuf to chain after head, freed by this function, may not be reference after returning + */ +void +netbuf_chain(struct netbuf *head, struct netbuf *tail) +{ + LWIP_ERROR("netbuf_chain: invalid head", (head != NULL), return;); + LWIP_ERROR("netbuf_chain: invalid tail", (tail != NULL), return;); + pbuf_cat(head->p, tail->p); + head->ptr = head->p; + memp_free(MEMP_NETBUF, tail); +} + +/** + * @ingroup netbuf + * Get the data pointer and length of the data inside a netbuf. + * + * @param buf netbuf to get the data from + * @param dataptr pointer to a void pointer where to store the data pointer + * @param len pointer to an u16_t where the length of the data is stored + * @return ERR_OK if the information was retrieved, + * ERR_BUF on error. + */ +err_t +netbuf_data(struct netbuf *buf, void **dataptr, u16_t *len) +{ + LWIP_ERROR("netbuf_data: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("netbuf_data: invalid len", (len != NULL), return ERR_ARG;); + + if (buf->ptr == NULL) { + return ERR_BUF; + } + *dataptr = buf->ptr->payload; + *len = buf->ptr->len; + return ERR_OK; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the next part. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + * @return -1 if there is no next part + * 1 if moved to the next part but now there is no next part + * 0 if moved to the next part and there are still more parts + */ +s8_t +netbuf_next(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_next: invalid buf", (buf != NULL), return -1;); + if (buf->ptr->next == NULL) { + return -1; + } + buf->ptr = buf->ptr->next; + if (buf->ptr->next == NULL) { + return 1; + } + return 0; +} + +/** + * @ingroup netbuf + * Move the current data pointer of a packet buffer contained in a netbuf + * to the beginning of the packet. + * The packet buffer itself is not modified. + * + * @param buf the netbuf to modify + */ +void +netbuf_first(struct netbuf *buf) +{ + LWIP_ERROR("netbuf_first: invalid buf", (buf != NULL), return;); + buf->ptr = buf->p; +} + +#endif /* LWIP_NETCONN */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c new file mode 100644 index 0000000..9ffc75a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netdb.c @@ -0,0 +1,414 @@ +/** + * @file + * API functions for name resolving + * + * @defgroup netdbapi NETDB API + * @ingroup socket + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/netdb.h" + +#if LWIP_DNS && LWIP_SOCKET + +#include "lwip/err.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/api.h" +#include "lwip/dns.h" + +#include /* memset */ +#include /* atoi */ + +/** helper struct for gethostbyname_r to access the char* buffer */ +struct gethostbyname_r_helper { + ip_addr_t *addr_list[2]; + ip_addr_t addr; + char *aliases; +}; + +/** h_errno is exported in netdb.h for access by applications. */ +#if LWIP_DNS_API_DECLARE_H_ERRNO +int h_errno; +#endif /* LWIP_DNS_API_DECLARE_H_ERRNO */ + +/** define "hostent" variables storage: 0 if we use a static (but unprotected) + * set of variables for lwip_gethostbyname, 1 if we use a local storage */ +#ifndef LWIP_DNS_API_HOSTENT_STORAGE +#define LWIP_DNS_API_HOSTENT_STORAGE 0 +#endif + +/** define "hostent" variables storage */ +#if LWIP_DNS_API_HOSTENT_STORAGE +#define HOSTENT_STORAGE +#else +#define HOSTENT_STORAGE static +#endif /* LWIP_DNS_API_STATIC_HOSTENT */ + +/** + * Returns an entry containing addresses of address family AF_INET + * for the host with name name. + * Due to dns_gethostbyname limitations, only one address is returned. + * + * @param name the hostname to resolve + * @return an entry containing addresses of address family AF_INET + * for the host with name name + */ +struct hostent * +lwip_gethostbyname(const char *name) +{ + err_t err; + ip_addr_t addr; + + /* buffer variables for lwip_gethostbyname() */ + HOSTENT_STORAGE struct hostent s_hostent; + HOSTENT_STORAGE char *s_aliases; + HOSTENT_STORAGE ip_addr_t s_hostent_addr; + HOSTENT_STORAGE ip_addr_t *s_phostent_addr[2]; + HOSTENT_STORAGE char s_hostname[DNS_MAX_NAME_LENGTH + 1]; + + /* query host IP address */ + err = netconn_gethostbyname(name, &addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + h_errno = HOST_NOT_FOUND; + return NULL; + } + + /* fill hostent */ + s_hostent_addr = addr; + s_phostent_addr[0] = &s_hostent_addr; + s_phostent_addr[1] = NULL; + strncpy(s_hostname, name, DNS_MAX_NAME_LENGTH); + s_hostname[DNS_MAX_NAME_LENGTH] = 0; + s_hostent.h_name = s_hostname; + s_aliases = NULL; + s_hostent.h_aliases = &s_aliases; + s_hostent.h_addrtype = AF_INET; + s_hostent.h_length = sizeof(ip_addr_t); + s_hostent.h_addr_list = (char **)&s_phostent_addr; + +#if DNS_DEBUG + /* dump hostent */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_name == %s\n", s_hostent.h_name)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_aliases == %p\n", (void *)s_hostent.h_aliases)); + /* h_aliases are always empty */ + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addrtype == %d\n", s_hostent.h_addrtype)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_length == %d\n", s_hostent.h_length)); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list == %p\n", (void *)s_hostent.h_addr_list)); + if (s_hostent.h_addr_list != NULL) { + u8_t idx; + for (idx = 0; s_hostent.h_addr_list[idx]; idx++) { + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i] == %p\n", idx, s_hostent.h_addr_list[idx])); + LWIP_DEBUGF(DNS_DEBUG, ("hostent.h_addr_list[%i]-> == %s\n", idx, ipaddr_ntoa((ip_addr_t *)s_hostent.h_addr_list[idx]))); + } + } +#endif /* DNS_DEBUG */ + +#if LWIP_DNS_API_HOSTENT_STORAGE + /* this function should return the "per-thread" hostent after copy from s_hostent */ + return sys_thread_hostent(&s_hostent); +#else + return &s_hostent; +#endif /* LWIP_DNS_API_HOSTENT_STORAGE */ +} + +/** + * Thread-safe variant of lwip_gethostbyname: instead of using a static + * buffer, this function takes buffer and errno pointers as arguments + * and uses these for the result. + * + * @param name the hostname to resolve + * @param ret pre-allocated struct where to store the result + * @param buf pre-allocated buffer where to store additional data + * @param buflen the size of buf + * @param result pointer to a hostent pointer that is set to ret on success + * and set to zero on error + * @param h_errnop pointer to an int where to store errors (instead of modifying + * the global h_errno) + * @return 0 on success, non-zero on error, additional error information + * is stored in *h_errnop instead of h_errno to be thread-safe + */ +int +lwip_gethostbyname_r(const char *name, struct hostent *ret, char *buf, + size_t buflen, struct hostent **result, int *h_errnop) +{ + err_t err; + struct gethostbyname_r_helper *h; + char *hostname; + size_t namelen; + int lh_errno; + + if (h_errnop == NULL) { + /* ensure h_errnop is never NULL */ + h_errnop = &lh_errno; + } + + if (result == NULL) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + /* first thing to do: set *result to nothing */ + *result = NULL; + if ((name == NULL) || (ret == NULL) || (buf == NULL)) { + /* not all arguments given */ + *h_errnop = EINVAL; + return -1; + } + + namelen = strlen(name); + if (buflen < (sizeof(struct gethostbyname_r_helper) + LWIP_MEM_ALIGN_BUFFER(namelen + 1))) { + /* buf can't hold the data needed + a copy of name */ + *h_errnop = ERANGE; + return -1; + } + + h = (struct gethostbyname_r_helper *)LWIP_MEM_ALIGN(buf); + hostname = ((char *)h) + sizeof(struct gethostbyname_r_helper); + + /* query host IP address */ + err = netconn_gethostbyname(name, &h->addr); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG, ("lwip_gethostbyname(%s) failed, err=%d\n", name, err)); + *h_errnop = HOST_NOT_FOUND; + return -1; + } + + /* copy the hostname into buf */ + MEMCPY(hostname, name, namelen); + hostname[namelen] = 0; + + /* fill hostent */ + h->addr_list[0] = &h->addr; + h->addr_list[1] = NULL; + h->aliases = NULL; + ret->h_name = hostname; + ret->h_aliases = &h->aliases; + ret->h_addrtype = AF_INET; + ret->h_length = sizeof(ip_addr_t); + ret->h_addr_list = (char **)&h->addr_list; + + /* set result != NULL */ + *result = ret; + + /* return success */ + return 0; +} + +/** + * Frees one or more addrinfo structures returned by getaddrinfo(), along with + * any additional storage associated with those structures. If the ai_next field + * of the structure is not null, the entire list of structures is freed. + * + * @param ai struct addrinfo to free + */ +void +lwip_freeaddrinfo(struct addrinfo *ai) +{ + struct addrinfo *next; + + while (ai != NULL) { + next = ai->ai_next; + memp_free(MEMP_NETDB, ai); + ai = next; + } +} + +/** + * Translates the name of a service location (for example, a host name) and/or + * a service name and returns a set of socket addresses and associated + * information to be used in creating a socket with which to address the + * specified service. + * Memory for the result is allocated internally and must be freed by calling + * lwip_freeaddrinfo()! + * + * Due to a limitation in dns_gethostbyname, only the first address of a + * host is returned. + * Also, service names are not supported (only port numbers)! + * + * @param nodename descriptive name or address string of the host + * (may be NULL -> local address) + * @param servname port number as string of NULL + * @param hints structure containing input values that set socktype and protocol + * @param res pointer to a pointer where to store the result (set to NULL on failure) + * @return 0 on success, non-zero on failure + * + * @todo: implement AI_V4MAPPED, AI_ADDRCONFIG + */ +int +lwip_getaddrinfo(const char *nodename, const char *servname, + const struct addrinfo *hints, struct addrinfo **res) +{ + err_t err; + ip_addr_t addr; + struct addrinfo *ai; + struct sockaddr_storage *sa = NULL; + int port_nr = 0; + size_t total_size; + size_t namelen = 0; + int ai_family; + + if (res == NULL) { + return EAI_FAIL; + } + *res = NULL; + if ((nodename == NULL) && (servname == NULL)) { + return EAI_NONAME; + } + + if (hints != NULL) { + ai_family = hints->ai_family; + if ((ai_family != AF_UNSPEC) +#if LWIP_IPV4 + && (ai_family != AF_INET) +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + && (ai_family != AF_INET6) +#endif /* LWIP_IPV6 */ + ) { + return EAI_FAMILY; + } + } else { + ai_family = AF_UNSPEC; + } + + if (servname != NULL) { + /* service name specified: convert to port number + * @todo?: currently, only ASCII integers (port numbers) are supported (AI_NUMERICSERV)! */ + port_nr = atoi(servname); + if ((port_nr <= 0) || (port_nr > 0xffff)) { + return EAI_SERVICE; + } + } + + if (nodename != NULL) { + /* service location specified, try to resolve */ + if ((hints != NULL) && (hints->ai_flags & AI_NUMERICHOST)) { + /* no DNS lookup, just parse for an address string */ + if (!ipaddr_aton(nodename, &addr)) { + return EAI_NONAME; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6_VAL(addr) && ai_family == AF_INET) || + (IP_IS_V4_VAL(addr) && ai_family == AF_INET6)) { + return EAI_NONAME; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + } else { +#if LWIP_IPV4 && LWIP_IPV6 + /* AF_UNSPEC: prefer IPv4 */ + u8_t type = NETCONN_DNS_IPV4_IPV6; + if (ai_family == AF_INET) { + type = NETCONN_DNS_IPV4; + } else if (ai_family == AF_INET6) { + type = NETCONN_DNS_IPV6; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + err = netconn_gethostbyname_addrtype(nodename, &addr, type); + if (err != ERR_OK) { + return EAI_FAIL; + } + } + } else { + /* service location specified, use loopback address */ + if ((hints != NULL) && (hints->ai_flags & AI_PASSIVE)) { + ip_addr_set_any_val(ai_family == AF_INET6, addr); + } else { + ip_addr_set_loopback_val(ai_family == AF_INET6, addr); + } + } + + total_size = sizeof(struct addrinfo) + sizeof(struct sockaddr_storage); + if (nodename != NULL) { + namelen = strlen(nodename); + if (namelen > DNS_MAX_NAME_LENGTH) { + /* invalid name length */ + return EAI_FAIL; + } + LWIP_ASSERT("namelen is too long", total_size + namelen + 1 > total_size); + total_size += namelen + 1; + } + /* If this fails, please report to lwip-devel! :-) */ + LWIP_ASSERT("total_size <= NETDB_ELEM_SIZE: please report this!", + total_size <= NETDB_ELEM_SIZE); + ai = (struct addrinfo *)memp_malloc(MEMP_NETDB); + if (ai == NULL) { + return EAI_MEMORY; + } + memset(ai, 0, total_size); + /* cast through void* to get rid of alignment warnings */ + sa = (struct sockaddr_storage *)(void *)((u8_t *)ai + sizeof(struct addrinfo)); + if (IP_IS_V6_VAL(addr)) { +#if LWIP_IPV6 + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa; + /* set up sockaddr */ + inet6_addr_from_ip6addr(&sa6->sin6_addr, ip_2_ip6(&addr)); + sa6->sin6_family = AF_INET6; + sa6->sin6_len = sizeof(struct sockaddr_in6); + sa6->sin6_port = lwip_htons((u16_t)port_nr); + sa6->sin6_scope_id = ip6_addr_zone(ip_2_ip6(&addr)); + ai->ai_family = AF_INET6; +#endif /* LWIP_IPV6 */ + } else { +#if LWIP_IPV4 + struct sockaddr_in *sa4 = (struct sockaddr_in *)sa; + /* set up sockaddr */ + inet_addr_from_ip4addr(&sa4->sin_addr, ip_2_ip4(&addr)); + sa4->sin_family = AF_INET; + sa4->sin_len = sizeof(struct sockaddr_in); + sa4->sin_port = lwip_htons((u16_t)port_nr); + ai->ai_family = AF_INET; +#endif /* LWIP_IPV4 */ + } + + /* set up addrinfo */ + if (hints != NULL) { + /* copy socktype & protocol from hints if specified */ + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + } + if (nodename != NULL) { + /* copy nodename to canonname if specified */ + ai->ai_canonname = ((char *)ai + sizeof(struct addrinfo) + sizeof(struct sockaddr_storage)); + MEMCPY(ai->ai_canonname, nodename, namelen); + ai->ai_canonname[namelen] = 0; + } + ai->ai_addrlen = sizeof(struct sockaddr_storage); + ai->ai_addr = (struct sockaddr *)sa; + + *res = ai; + + return 0; +} + +#endif /* LWIP_DNS && LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c new file mode 100644 index 0000000..7c2011f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/netifapi.c @@ -0,0 +1,380 @@ +/** + * @file + * Network Interface Sequential API module + * + * @defgroup netifapi NETIF API + * @ingroup sequential_api + * Thread-safe functions to be called from non-TCPIP threads + * + * @defgroup netifapi_netif NETIF related + * @ingroup netifapi + * To be called from non-TCPIP threads + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_NETIF_API /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/netifapi.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include /* strncpy */ + +#define NETIFAPI_VAR_REF(name) API_VAR_REF(name) +#define NETIFAPI_VAR_DECLARE(name) API_VAR_DECLARE(struct netifapi_msg, name) +#define NETIFAPI_VAR_ALLOC(name) API_VAR_ALLOC(struct netifapi_msg, MEMP_NETIFAPI_MSG, name, ERR_MEM) +#define NETIFAPI_VAR_FREE(name) API_VAR_FREE(MEMP_NETIFAPI_MSG, name) + +/** + * Call netif_add() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_add(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_add( msg->netif, +#if LWIP_IPV4 + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw), +#endif /* LWIP_IPV4 */ + msg->msg.add.state, + msg->msg.add.init, + msg->msg.add.input)) { + return ERR_IF; + } else { + return ERR_OK; + } +} + +#if LWIP_IPV4 +/** + * Call netif_set_addr() inside the tcpip_thread context. + */ +static err_t +netifapi_do_netif_set_addr(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + netif_set_addr( msg->netif, + API_EXPR_REF(msg->msg.add.ipaddr), + API_EXPR_REF(msg->msg.add.netmask), + API_EXPR_REF(msg->msg.add.gw)); + return ERR_OK; +} +#endif /* LWIP_IPV4 */ + +/** +* Call netif_name_to_index() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_name_to_index(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + msg->msg.ifs.index = netif_name_to_index(msg->msg.ifs.name); + return ERR_OK; +} + +/** +* Call netif_index_to_name() inside the tcpip_thread context. +*/ +static err_t +netifapi_do_index_to_name(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (!netif_index_to_name(msg->msg.ifs.index, msg->msg.ifs.name)) { + /* return failure via empty name */ + msg->msg.ifs.name[0] = '\0'; + } + return ERR_OK; +} + +/** + * Call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) inside the + * tcpip_thread context. + */ +static err_t +netifapi_do_netif_common(struct tcpip_api_call_data *m) +{ + /* cast through void* to silence alignment warnings. + * We know it works because the structs have been instantiated as struct netifapi_msg */ + struct netifapi_msg *msg = (struct netifapi_msg *)(void *)m; + + if (msg->msg.common.errtfunc != NULL) { + return msg->msg.common.errtfunc(msg->netif); + } else { + msg->msg.common.voidfunc(msg->netif); + return ERR_OK; + } +} + +#if LWIP_ARP && LWIP_IPV4 +/** + * @ingroup netifapi_arp + * Add or update an entry in the ARP cache. + * For an update, ipaddr is used to find the cache entry. + * + * @param ipaddr IPv4 address of cache entry + * @param ethaddr hardware address mapped to ipaddr + * @param type type of ARP cache entry + * @return ERR_OK: entry added/updated, else error from err_t + */ +err_t +netifapi_arp_add(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_add_static_entry(ipaddr, ethaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + LWIP_UNUSED_ARG(ethaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} + +/** + * @ingroup netifapi_arp + * Remove an entry in the ARP cache identified by ipaddr + * + * @param ipaddr IPv4 address of cache entry + * @param type type of ARP cache entry + * @return ERR_OK: entry removed, else error from err_t + */ +err_t +netifapi_arp_remove(const ip4_addr_t *ipaddr, enum netifapi_arp_entry type) +{ + err_t err; + + /* We only support permanent entries currently */ + LWIP_UNUSED_ARG(type); + +#if ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); + err = etharp_remove_static_entry(ipaddr); + UNLOCK_TCPIP_CORE(); +#else + /* @todo add new vars to struct netifapi_msg and create a 'do' func */ + LWIP_UNUSED_ARG(ipaddr); + err = ERR_VAL; +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES && LWIP_TCPIP_CORE_LOCKING */ + + return err; +} +#endif /* LWIP_ARP && LWIP_IPV4 */ + +/** + * @ingroup netifapi_netif + * Call netif_add() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_add() + */ +err_t +netifapi_netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } +#endif /* LWIP_IPV4 */ + + NETIFAPI_VAR_REF(msg).netif = netif; +#if LWIP_IPV4 + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); +#endif /* LWIP_IPV4 */ + NETIFAPI_VAR_REF(msg).msg.add.state = state; + NETIFAPI_VAR_REF(msg).msg.add.init = init; + NETIFAPI_VAR_REF(msg).msg.add.input = input; + err = tcpip_api_call(netifapi_do_netif_add, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +#if LWIP_IPV4 +/** + * @ingroup netifapi_netif + * Call netif_set_addr() in a thread-safe way by running that function inside the + * tcpip_thread context. + * + * @note for params @see netif_set_addr() + */ +err_t +netifapi_netif_set_addr(struct netif *netif, + const ip4_addr_t *ipaddr, + const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.add.ipaddr = NETIFAPI_VAR_REF(ipaddr); + NETIFAPI_VAR_REF(msg).msg.add.netmask = NETIFAPI_VAR_REF(netmask); + NETIFAPI_VAR_REF(msg).msg.add.gw = NETIFAPI_VAR_REF(gw); + err = tcpip_api_call(netifapi_do_netif_set_addr, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} +#endif /* LWIP_IPV4 */ + +/** + * call the "errtfunc" (or the "voidfunc" if "errtfunc" is NULL) in a thread-safe + * way by running that function inside the tcpip_thread context. + * + * @note use only for functions where there is only "netif" parameter. + */ +err_t +netifapi_netif_common(struct netif *netif, netifapi_void_fn voidfunc, + netifapi_errt_fn errtfunc) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).netif = netif; + NETIFAPI_VAR_REF(msg).msg.common.voidfunc = voidfunc; + NETIFAPI_VAR_REF(msg).msg.common.errtfunc = errtfunc; + err = tcpip_api_call(netifapi_do_netif_common, &API_VAR_REF(msg).call); + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_name_to_index() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param name the interface name of the netif +* @param idx output index of the found netif +*/ +err_t +netifapi_netif_name_to_index(const char *name, u8_t *idx) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + *idx = 0; + +#if LWIP_MPU_COMPATIBLE + strncpy(NETIFAPI_VAR_REF(msg).msg.ifs.name, name, NETIF_NAMESIZE - 1); + NETIFAPI_VAR_REF(msg).msg.ifs.name[NETIF_NAMESIZE - 1] = '\0'; +#else + NETIFAPI_VAR_REF(msg).msg.ifs.name = LWIP_CONST_CAST(char *, name); +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_name_to_index, &API_VAR_REF(msg).call); + if (!err) { + *idx = NETIFAPI_VAR_REF(msg).msg.ifs.index; + } + NETIFAPI_VAR_FREE(msg); + return err; +} + +/** +* @ingroup netifapi_netif +* Call netif_index_to_name() in a thread-safe way by running that function inside the +* tcpip_thread context. +* +* @param idx the interface index of the netif +* @param name output name of the found netif, empty '\0' string if netif not found. +* name should be of at least NETIF_NAMESIZE bytes +*/ +err_t +netifapi_netif_index_to_name(u8_t idx, char *name) +{ + err_t err; + NETIFAPI_VAR_DECLARE(msg); + NETIFAPI_VAR_ALLOC(msg); + + NETIFAPI_VAR_REF(msg).msg.ifs.index = idx; +#if !LWIP_MPU_COMPATIBLE + NETIFAPI_VAR_REF(msg).msg.ifs.name = name; +#endif /* LWIP_MPU_COMPATIBLE */ + err = tcpip_api_call(netifapi_do_index_to_name, &API_VAR_REF(msg).call); +#if LWIP_MPU_COMPATIBLE + if (!err) { + strncpy(name, NETIFAPI_VAR_REF(msg).msg.ifs.name, NETIF_NAMESIZE - 1); + name[NETIF_NAMESIZE - 1] = '\0'; + } +#endif /* LWIP_MPU_COMPATIBLE */ + NETIFAPI_VAR_FREE(msg); + return err; +} + +#endif /* LWIP_NETIF_API */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c new file mode 100644 index 0000000..0e968b8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/sockets.c @@ -0,0 +1,4160 @@ +/** + * @file + * Sockets BSD-Like API module + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * Improved by Marc Boucher and David Haas + * + */ + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/sockets.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/api.h" +#include "lwip/igmp.h" +#include "lwip/inet.h" +#include "lwip/tcp.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/mld6.h" +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +#include +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* If the netconn API is not required publicly, then we include the necessary + files here to get the implementation */ +#if !LWIP_NETCONN +#undef LWIP_NETCONN +#define LWIP_NETCONN 1 +#include "api_msg.c" +#include "api_lib.c" +#include "netbuf.c" +#undef LWIP_NETCONN +#define LWIP_NETCONN 0 +#endif + +#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name) +#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name) +#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock) +#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name) + +#if LWIP_IPV4 +#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \ + (sin)->sin_len = sizeof(struct sockaddr_in); \ + (sin)->sin_family = AF_INET; \ + (sin)->sin_port = lwip_htons((port)); \ + inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \ + memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0) +#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \ + inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \ + (port) = lwip_ntohs((sin)->sin_port); }while(0) +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \ + (sin6)->sin6_len = sizeof(struct sockaddr_in6); \ + (sin6)->sin6_family = AF_INET6; \ + (sin6)->sin6_port = lwip_htons((port)); \ + (sin6)->sin6_flowinfo = 0; \ + inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \ + (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0) +#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \ + inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \ + if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \ + ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \ + } \ + (port) = lwip_ntohs((sin6)->sin6_port); }while(0) +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 && LWIP_IPV6 +static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port); + +#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \ + ((namelen) == sizeof(struct sockaddr_in6))) +#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \ + ((name)->sa_family == AF_INET6)) +#define SOCK_ADDR_TYPE_MATCH(name, sock) \ + ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \ + (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type)))) +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \ + if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \ + } else { \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \ + } } while(0) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port)) +#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \ + (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6)) +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */ +#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in)) +#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET) +#define SOCK_ADDR_TYPE_MATCH(name, sock) 1 +#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \ + IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port) +#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \ + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port) +#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type) +#endif /* LWIP_IPV6 */ + +#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \ + IS_SOCK_ADDR_TYPE_VALID(name)) +#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \ + SOCK_ADDR_TYPE_MATCH(name, sock)) +#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0) + + +#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \ + if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0) +#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \ + if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0) + + +#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name) +#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name) +#if LWIP_MPU_COMPATIBLE +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \ + name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \ + if (name == NULL) { \ + sock_set_errno(sock, ENOMEM); \ + done_socket(sock); \ + return -1; \ + } }while(0) +#else /* LWIP_MPU_COMPATIBLE */ +#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) +#endif /* LWIP_MPU_COMPATIBLE */ + +#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val)) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval)) +#else +#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval +#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \ + u32_t loc = (val); \ + ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \ + ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0) +#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000)) +#endif + + +/** A struct sockaddr replacement that has the same alignment as sockaddr_in/ + * sockaddr_in6 if instantiated. + */ +union sockaddr_aligned { + struct sockaddr sa; +#if LWIP_IPV6 + struct sockaddr_in6 sin6; +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + struct sockaddr_in sin; +#endif /* LWIP_IPV4 */ +}; + +/* Define the number of IPv4 multicast memberships, default is one per socket */ +#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS +#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS +#endif + +#if LWIP_IGMP +/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface address */ + ip4_addr_t if_addr; + /** the group address */ + ip4_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr); +static void lwip_socket_drop_registered_memberships(int s); +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when + a socket is closed */ +struct lwip_socket_multicast_mld6_pair { + /** the socket */ + struct lwip_sock *sock; + /** the interface index */ + u8_t if_idx; + /** the group address */ + ip6_addr_t multi_addr; +}; + +static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS]; + +static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr); +static void lwip_socket_drop_registered_mld6_memberships(int s); +#endif /* LWIP_IPV6_MLD */ + +/** The global array of available sockets */ +static struct lwip_sock sockets[NUM_SOCKETS]; + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +#if LWIP_TCPIP_CORE_LOCKING +/* protect the select_cb_list using core lock */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE() +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE() +#else /* LWIP_TCPIP_CORE_LOCKING */ +/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */ +#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev) +#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev) +#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev) +/** This counter is increased from lwip_select when the list is changed + and checked in select_check_waiters to see if it has changed. */ +static volatile int select_cb_ctr; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +/** The global list of tasks waiting for select */ +static struct lwip_select_cb *select_cb_list; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#define sock_set_errno(sk, e) do { \ + const int sockerr = (e); \ + set_errno(sockerr); \ +} while (0) + +/* Forward declaration of some functions */ +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len); +#define DEFAULT_SOCKET_EVENTCB event_callback +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent); +#else +#define DEFAULT_SOCKET_EVENTCB NULL +#endif +#if !LWIP_TCPIP_CORE_LOCKING +static void lwip_getsockopt_callback(void *arg); +static void lwip_setsockopt_callback(void *arg); +#endif +static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen); +static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen); +static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata); +static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata); + +#if LWIP_IPV4 && LWIP_IPV6 +static void +sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port) +{ + if ((sockaddr->sa_family) == AF_INET6) { + SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V6; + } else { + SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port); + ipaddr->type = IPADDR_TYPE_V4; + } +} +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */ +void +lwip_socket_thread_init(void) +{ + netconn_thread_init(); +} + +/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */ +void +lwip_socket_thread_cleanup(void) +{ + netconn_thread_cleanup(); +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Thread-safe increment of sock->fd_used, with overflow check */ +static int +sock_inc_used(struct lwip_sock *sock) +{ + int ret; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + if (sock->fd_free_pending) { + /* prevent new usage of this socket if free is pending */ + ret = 0; + } else { + ++sock->fd_used; + ret = 1; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + } + SYS_ARCH_UNPROTECT(lev); + return ret; +} + +/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */ +static int +sock_inc_used_locked(struct lwip_sock *sock) +{ + LWIP_ASSERT("sock != NULL", sock != NULL); + + if (sock->fd_free_pending) { + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 0; + } + + ++sock->fd_used; + LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0); + return 1; +} + +/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being + * released (and possibly reused) when used from more than one thread + * (e.g. read-while-write or close-while-write, etc) + * This function is called at the end of functions using (try)get_socket*(). + */ +static void +done_socket(struct lwip_sock *sock) +{ + int freed = 0; + int is_tcp = 0; + struct netconn *conn = NULL; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_ASSERT("sock != NULL", sock != NULL); + + SYS_ARCH_PROTECT(lev); + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + if (--sock->fd_used == 0) { + if (sock->fd_free_pending) { + /* free the socket */ + sock->fd_used = 1; + is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP; + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + } + } + SYS_ARCH_UNPROTECT(lev); + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define sock_inc_used(sock) 1 +#define sock_inc_used_locked(sock) 1 +#define done_socket(sock) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn_nouse(int fd) +{ + int s = fd - LWIP_SOCKET_OFFSET; + if ((s < 0) || (s >= NUM_SOCKETS)) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd)); + return NULL; + } + return &sockets[s]; +} + +struct lwip_sock * +lwip_socket_dbg_get_socket(int fd) +{ + return tryget_socket_unconn_nouse(fd); +} + +/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */ +static struct lwip_sock * +tryget_socket_unconn(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used(ret)) { + return NULL; + } + } + return ret; +} + +/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */ +static struct lwip_sock * +tryget_socket_unconn_locked(int fd) +{ + struct lwip_sock *ret = tryget_socket_unconn_nouse(fd); + if (ret != NULL) { + if (!sock_inc_used_locked(ret)) { + return NULL; + } + } + return ret; +} + +/** + * Same as get_socket but doesn't set errno + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +tryget_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket_unconn(fd); + if (sock != NULL) { + if (sock->conn) { + return sock; + } + done_socket(sock); + } + return NULL; +} + +/** + * Map a externally used socket index to the internal socket representation. + * + * @param fd externally used socket index + * @return struct lwip_sock for the socket or NULL if not found + */ +static struct lwip_sock * +get_socket(int fd) +{ + struct lwip_sock *sock = tryget_socket(fd); + if (!sock) { + if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd)); + } + set_errno(EBADF); + return NULL; + } + return sock; +} + +/** + * Allocate a new socket for a given netconn. + * + * @param newconn the netconn for which to allocate a socket + * @param accepted 1 if socket has been created by accept(), + * 0 if socket has been created by socket() + * @return the index of the new socket; -1 on error + */ +static int +alloc_socket(struct netconn *newconn, int accepted) +{ + int i; + SYS_ARCH_DECL_PROTECT(lev); + LWIP_UNUSED_ARG(accepted); + + /* allocate a new socket identifier */ + for (i = 0; i < NUM_SOCKETS; ++i) { + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + if (!sockets[i].conn) { +#if LWIP_NETCONN_FULLDUPLEX + if (sockets[i].fd_used) { + SYS_ARCH_UNPROTECT(lev); + continue; + } + sockets[i].fd_used = 1; + sockets[i].fd_free_pending = 0; +#endif + sockets[i].conn = newconn; + /* The socket is not yet known to anyone, so no need to protect + after having marked it as used. */ + SYS_ARCH_UNPROTECT(lev); + sockets[i].lastdata.pbuf = NULL; +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL + LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0); + sockets[i].rcvevent = 0; + /* TCP sendbuf is empty, but the socket is not yet writable until connected + * (unless it has been created by accept()). */ + sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1); + sockets[i].errevent = 0; +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + return i + LWIP_SOCKET_OFFSET; + } + SYS_ARCH_UNPROTECT(lev); + } + return -1; +} + +/** Free a socket (under lock) + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + * @param conn the socekt's netconn is stored here, must be freed externally + * @param lastdata lastdata is stored here, must be freed externally + */ +static int +free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn, + union lwip_sock_lastdata *lastdata) +{ +#if LWIP_NETCONN_FULLDUPLEX + LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0); + sock->fd_used--; + if (sock->fd_used > 0) { + sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0); + return 0; + } +#else /* LWIP_NETCONN_FULLDUPLEX */ + LWIP_UNUSED_ARG(is_tcp); +#endif /* LWIP_NETCONN_FULLDUPLEX */ + + *lastdata = sock->lastdata; + sock->lastdata.pbuf = NULL; + *conn = sock->conn; + sock->conn = NULL; + return 1; +} + +/** Free a socket's leftover members. + */ +static void +free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata) +{ + if (lastdata->pbuf != NULL) { + if (is_tcp) { + pbuf_free(lastdata->pbuf); + } else { + netbuf_delete(lastdata->netbuf); + } + } + if (conn != NULL) { + /* netconn_prepare_delete() has already been called, here we only free the conn */ + netconn_delete(conn); + } +} + +/** Free a socket. The socket's netconn must have been + * delete before! + * + * @param sock the socket to free + * @param is_tcp != 0 for TCP sockets, used to free lastdata + */ +static void +free_socket(struct lwip_sock *sock, int is_tcp) +{ + int freed; + struct netconn *conn; + union lwip_sock_lastdata lastdata; + SYS_ARCH_DECL_PROTECT(lev); + + /* Protect socket array */ + SYS_ARCH_PROTECT(lev); + + freed = free_socket_locked(sock, is_tcp, &conn, &lastdata); + SYS_ARCH_UNPROTECT(lev); + /* don't use 'sock' after this line, as another task might have allocated it */ + + if (freed) { + free_socket_free_elements(is_tcp, conn, &lastdata); + } +} + +/* Below this, the well-known socket functions are implemented. + * Use google.com or opengroup.org to get a good description :-) + * + * Exceptions are documented! + */ + +int +lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ + struct lwip_sock *sock, *nsock; + struct netconn *newconn; + ip_addr_t naddr; + u16_t port = 0; + int newsock; + err_t err; + int recvevent; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s)); + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* wait for a new connection */ + err = netconn_accept(sock->conn, &newconn); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else if (err == ERR_CLSD) { + sock_set_errno(sock, EINVAL); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + LWIP_ASSERT("newconn != NULL", newconn != NULL); + + newsock = alloc_socket(newconn, 1); + if (newsock == -1) { + netconn_delete(newconn); + sock_set_errno(sock, ENFILE); + done_socket(sock); + return -1; + } + LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET)); + nsock = &sockets[newsock - LWIP_SOCKET_OFFSET]; + + /* See event_callback: If data comes in right away after an accept, even + * though the server task might not have created a new socket yet. + * In that case, newconn->socket is counted down (newconn->socket--), + * so nsock->rcvevent is >= 1 here! + */ + SYS_ARCH_PROTECT(lev); + recvevent = (s16_t)(-1 - newconn->socket); + newconn->socket = newsock; + SYS_ARCH_UNPROTECT(lev); + + if (newconn->callback) { + LOCK_TCPIP_CORE(); + while (recvevent > 0) { + recvevent--; + newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0); + } + UNLOCK_TCPIP_CORE(); + } + + /* Note that POSIX only requires us to check addr is non-NULL. addrlen must + * not be NULL if addr is valid. + */ + if ((addr != NULL) && (addrlen != NULL)) { + union sockaddr_aligned tempaddr; + /* get the IP address and port of the remote host */ + err = netconn_peer(newconn, &naddr, &port); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err)); + netconn_delete(newconn); + free_socket(nsock, 1); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port); + if (*addrlen > tempaddr.sa.sa_len) { + *addrlen = tempaddr.sa.sa_len; + } + MEMCPY(addr, &tempaddr, *addrlen); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port)); + } else { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock)); + } + + sock_set_errno(sock, 0); + done_socket(sock); + done_socket(nsock); + return newsock; +} + +int +lwip_bind(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + ip_addr_t local_addr; + u16_t local_port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(namelen); + + SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr)); + IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_bind(sock->conn, &local_addr, local_port); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_close(int s) +{ + struct lwip_sock *sock; + int is_tcp = 0; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; + } else { + LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL); + } + +#if LWIP_IGMP + /* drop all possibly joined IGMP memberships */ + lwip_socket_drop_registered_memberships(s); +#endif /* LWIP_IGMP */ +#if LWIP_IPV6_MLD + /* drop all possibly joined MLD6 memberships */ + lwip_socket_drop_registered_mld6_memberships(s); +#endif /* LWIP_IPV6_MLD */ + + err = netconn_prepare_delete(sock->conn); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + free_socket(sock, is_tcp); + set_errno(0); + return 0; +} + +int +lwip_connect(int s, const struct sockaddr *name, socklen_t namelen) +{ + struct lwip_sock *sock; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) { + /* sockaddr does not match socket type (IPv4/IPv6) */ + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + + LWIP_UNUSED_ARG(namelen); + if (name->sa_family == AF_UNSPEC) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s)); + err = netconn_disconnect(sock->conn); + } else { + ip_addr_t remote_addr; + u16_t remote_port; + + /* check size, family and alignment of 'name' */ + LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) && + IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port)); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr)); + IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + err = netconn_connect(sock->conn, &remote_addr, remote_port); + } + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +/** + * Set a socket into listen mode. + * The socket may not have been used for another connection previously. + * + * @param s the socket to set to listening mode + * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1) + * @return 0 on success, non-zero on failure + */ +int +lwip_listen(int s, int backlog) +{ + struct lwip_sock *sock; + err_t err; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* limit the "backlog" parameter to fit in an u8_t */ + backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff); + + err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog); + + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err)); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + } else { + sock_set_errno(sock, err_to_errno(err)); + } + done_socket(sock); + return -1; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +#if LWIP_TCP +/* Helper function to loop over receiving pbufs from netconn + * until "len" bytes are received or we're otherwise done. + * Keeps sock->lastdata for peeking or partly copying. + */ +static ssize_t +lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags) +{ + u8_t apiflags = NETCONN_NOAUTORCVD; + ssize_t recvd = 0; + ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX; + + LWIP_ASSERT("no socket given", sock != NULL); + LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP); + + if (flags & MSG_DONTWAIT) { + apiflags |= NETCONN_DONTBLOCK; + } + + do { + struct pbuf *p; + err_t err; + u16_t copylen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf)); + /* Check if there is data left from the last recv operation. */ + if (sock->lastdata.pbuf) { + p = sock->lastdata.pbuf; + } else { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n", + err, (void *)p)); + + if (err != ERR_OK) { + if (recvd > 0) { + /* already received data, return that (this trusts in getting the same error from + netconn layer again next time netconn_recv is called) */ + goto lwip_recv_tcp_done; + } + /* We should really do some error checking here. */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n", + lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + if (err == ERR_CLSD) { + return 0; + } else { + return -1; + } + } + LWIP_ASSERT("p != NULL", p != NULL); + sock->lastdata.pbuf = p; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n", + p->tot_len, (int)recv_left, (int)recvd)); + + if (recv_left > p->tot_len) { + copylen = p->tot_len; + } else { + copylen = (u16_t)recv_left; + } + if (recvd + copylen < recvd) { + /* overflow */ + copylen = (u16_t)(SSIZE_MAX - recvd); + } + + /* copy the contents of the received buffer into + the supplied memory pointer mem */ + pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0); + + recvd += copylen; + + /* TCP combines multiple pbufs for one recv */ + LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen); + recv_left -= copylen; + + /* Unless we peek the incoming message... */ + if ((flags & MSG_PEEK) == 0) { + /* ... check if there is data left in the pbuf */ + LWIP_ASSERT("invalid copylen", p->tot_len >= copylen); + if (p->tot_len - copylen > 0) { + /* If so, it should be saved in the sock structure for the next recv call. + We store the pbuf but hide/free the consumed data: */ + sock->lastdata.pbuf = pbuf_free_header(p, copylen); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf)); + } else { + sock->lastdata.pbuf = NULL; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p)); + pbuf_free(p); + } + } + /* once we have some data to return, only add more if we don't need to wait */ + apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN; + /* @todo: do we need to support peeking more than one pbuf? */ + } while ((recv_left > 0) && !(flags & MSG_PEEK)); +lwip_recv_tcp_done: + if ((recvd > 0) && !(flags & MSG_PEEK)) { + /* ensure window update after copying all data */ + netconn_tcp_recvd(sock->conn, (size_t)recvd); + } + sock_set_errno(sock, 0); + return recvd; +} +#endif + +/* Convert a netbuf's address data to struct sockaddr */ +static int +lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port, + struct sockaddr *from, socklen_t *fromlen) +{ + int truncated = 0; + union sockaddr_aligned saddr; + + LWIP_UNUSED_ARG(conn); + + LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL); + LWIP_ASSERT("from != NULL", from != NULL); + LWIP_ASSERT("fromlen != NULL", fromlen != NULL); + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr)); + IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port); + if (*fromlen < saddr.sa.sa_len) { + truncated = 1; + } else if (*fromlen > saddr.sa.sa_len) { + *fromlen = saddr.sa.sa_len; + } + MEMCPY(from, &saddr, *fromlen); + return truncated; +} + +#if LWIP_TCP +/* Helper function to get a tcp socket's remote address info */ +static int +lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret) +{ + if (sock == NULL) { + return 0; + } + LWIP_UNUSED_ARG(dbg_fn); + LWIP_UNUSED_ARG(dbg_s); + LWIP_UNUSED_ARG(dbg_ret); + +#if !SOCKETS_DEBUG + if (from && fromlen) +#endif /* !SOCKETS_DEBUG */ + { + /* get remote addr/port from tcp_pcb */ + u16_t port; + ip_addr_t tmpaddr; + netconn_getaddr(sock->conn, &tmpaddr, &port, 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret)); + if (from && fromlen) { + return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen); + } + } + return 0; +} +#endif + +/* Helper function to receive a netbuf from a udp or raw netconn. + * Keeps sock->lastdata for peeking. + */ +static err_t +lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s) +{ + struct netbuf *buf; + u8_t apiflags; + err_t err; + u16_t buflen, copylen, copied; + int i; + + LWIP_UNUSED_ARG(dbg_s); + LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;); + + if (flags & MSG_DONTWAIT) { + apiflags = NETCONN_DONTBLOCK; + } else { + apiflags = 0; + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf)); + /* Check if there is data left from the last recv operation. */ + buf = sock->lastdata.netbuf; + if (buf == NULL) { + /* No data was left from the previous operation, so we try to get + some from the network. */ + err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n", + err, (void *)buf)); + + if (err != ERR_OK) { + return err; + } + LWIP_ASSERT("buf != NULL", buf != NULL); + sock->lastdata.netbuf = buf; + } + buflen = buf->p->tot_len; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen)); + + copied = 0; + /* copy the pbuf payload into the iovs */ + for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) { + u16_t len_left = (u16_t)(buflen - copied); + if (msg->msg_iov[i].iov_len > len_left) { + copylen = len_left; + } else { + copylen = (u16_t)msg->msg_iov[i].iov_len; + } + + /* copy the contents of the received buffer into + the supplied memory buffer */ + pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied); + copied = (u16_t)(copied + copylen); + } + + /* Check to see from where the data was.*/ +#if !SOCKETS_DEBUG + if (msg->msg_name && msg->msg_namelen) +#endif /* !SOCKETS_DEBUG */ + { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf)); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied)); + if (msg->msg_name && msg->msg_namelen) { + lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf), + (struct sockaddr *)msg->msg_name, &msg->msg_namelen); + } + } + + /* Initialize flag output */ + msg->msg_flags = 0; + + if (msg->msg_control) { + u8_t wrote_msg = 0; +#if LWIP_NETBUF_RECVINFO + /* Check if packet info was recorded */ + if (buf->flags & NETBUF_FLAG_DESTADDR) { + if (IP_IS_V4(&buf->toaddr)) { +#if LWIP_IPV4 + if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) { + struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */ + struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr); + chdr->cmsg_level = IPPROTO_IP; + chdr->cmsg_type = IP_PKTINFO; + chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo)); + pkti->ipi_ifindex = buf->p->if_idx; + inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf))); + msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo)); + wrote_msg = 1; + } else { + msg->msg_flags |= MSG_CTRUNC; + } +#endif /* LWIP_IPV4 */ + } + } +#endif /* LWIP_NETBUF_RECVINFO */ + + if (!wrote_msg) { + msg->msg_controllen = 0; + } + } + + /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */ + if ((flags & MSG_PEEK) == 0) { + sock->lastdata.netbuf = NULL; + netbuf_delete(buf); + } + if (datagram_len) { + *datagram_len = buflen; + } + return ERR_OK; +} + +ssize_t +lwip_recvfrom(int s, void *mem, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen) +{ + struct lwip_sock *sock; + ssize_t ret; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags)); + sock = get_socket(s); + if (!sock) { + return -1; + } +#if LWIP_TCP + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + ret = lwip_recv_tcp(sock, mem, len, flags); + lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret); + done_socket(sock); + return ret; + } else +#endif + { + u16_t datagram_len = 0; + struct iovec vec; + struct msghdr msg; + err_t err; + vec.iov_base = mem; + vec.iov_len = len; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_name = from; + msg.msg_namelen = (fromlen ? *fromlen : 0); + err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX); + if (fromlen) { + *fromlen = msg.msg_namelen; + } + } + + sock_set_errno(sock, 0); + done_socket(sock); + return ret; +} + +ssize_t +lwip_read(int s, void *mem, size_t len) +{ + return lwip_recvfrom(s, mem, len, 0, NULL, NULL); +} + +ssize_t +lwip_readv(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_recvmsg(s, &msg, 0); +} + +ssize_t +lwip_recv(int s, void *mem, size_t len, int flags) +{ + return lwip_recvfrom(s, mem, len, flags, NULL, NULL); +} + +ssize_t +lwip_recvmsg(int s, struct msghdr *message, int flags) +{ + struct lwip_sock *sock; + int i; + ssize_t buflen; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags)); + LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;); + LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0, + set_errno(EOPNOTSUPP); return -1;); + + if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) { + set_errno(EMSGSIZE); + return -1; + } + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* check for valid vectors */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) || + ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) || + ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) { + sock_set_errno(sock, err_to_errno(ERR_VAL)); + done_socket(sock); + return -1; + } + buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len); + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + int recv_flags = flags; + message->msg_flags = 0; + /* recv the data */ + buflen = 0; + for (i = 0; i < message->msg_iovlen; i++) { + /* try to receive into this vector's buffer */ + ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags); + if (recvd_local > 0) { + /* sum up received bytes */ + buflen += recvd_local; + } + if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) || + (flags & MSG_PEEK)) { + /* returned prematurely (or peeking, which might actually be limitated to the first iov) */ + if (buflen <= 0) { + /* nothing received at all, propagate the error */ + buflen = recvd_local; + } + break; + } + /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */ + recv_flags |= MSG_DONTWAIT; + } + if (buflen > 0) { + /* reset socket error since we have received something */ + sock_set_errno(sock, 0); + } + /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */ + done_socket(sock); + return buflen; +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + u16_t datagram_len = 0; + err_t err; + err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s); + if (err != ERR_OK) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n", + s, lwip_strerr(err))); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + if (datagram_len > buflen) { + message->msg_flags |= MSG_TRUNC; + } + + sock_set_errno(sock, 0); + done_socket(sock); + return (int)datagram_len; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_send(int s, const void *data, size_t size, int flags) +{ + struct lwip_sock *sock; + err_t err; + u8_t write_flags; + size_t written; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n", + s, data, size, flags)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { +#if (LWIP_UDP || LWIP_RAW) + done_socket(sock); + return lwip_sendto(s, data, size, flags, NULL, 0); +#else /* (LWIP_UDP || LWIP_RAW) */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* (LWIP_UDP || LWIP_RAW) */ + } + + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + written = 0; + err = netconn_write_partly(sock->conn, data, size, write_flags, &written); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written)); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +} + +ssize_t +lwip_sendmsg(int s, const struct msghdr *msg, int flags) +{ + struct lwip_sock *sock; +#if LWIP_TCP + u8_t write_flags; + size_t written; +#endif + err_t err = ERR_OK; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL, + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX), + sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;); + LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0, + sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;); + + LWIP_UNUSED_ARG(msg->msg_control); + LWIP_UNUSED_ARG(msg->msg_controllen); + LWIP_UNUSED_ARG(msg->msg_flags); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + write_flags = (u8_t)(NETCONN_COPY | + ((flags & MSG_MORE) ? NETCONN_MORE : 0) | + ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0)); + + written = 0; + err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written); + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */ + return (err == ERR_OK ? (ssize_t)written : -1); +#else /* LWIP_TCP */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + /* else, UDP and RAW NETCONNs */ +#if LWIP_UDP || LWIP_RAW + { + struct netbuf chain_buf; + int i; + ssize_t size = 0; + + LWIP_UNUSED_ARG(flags); + LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) || + IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + + /* initialize chain buffer with destination */ + memset(&chain_buf, 0, sizeof(struct netbuf)); + if (msg->msg_name) { + u16_t remote_port; + SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port); + netbuf_fromport(&chain_buf) = remote_port; + } +#if LWIP_NETIF_TX_SINGLE_PBUF + for (i = 0; i < msg->msg_iovlen; i++) { + size += msg->msg_iov[i].iov_len; + if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) { + /* overflow */ + goto sendmsg_emsgsize; + } + } + if (size > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) { + err = ERR_MEM; + } else { + /* flatten the IO vectors */ + size_t offset = 0; + for (i = 0; i < msg->msg_iovlen; i++) { + MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + offset += msg->msg_iov[i].iov_len; + } +#if LWIP_CHECKSUM_ON_COPY + { + /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */ + u16_t chksum = ~inet_chksum_pbuf(chain_buf.p); + netbuf_set_chksum(&chain_buf, chksum); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain + manually to avoid having to allocate, chain, and delete a netbuf for each iov */ + for (i = 0; i < msg->msg_iovlen; i++) { + struct pbuf *p; + if (msg->msg_iov[i].iov_len > 0xFFFF) { + /* overflow */ + goto sendmsg_emsgsize; + } + p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF); + if (p == NULL) { + err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */ + break; + } + p->payload = msg->msg_iov[i].iov_base; + p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len; + /* netbuf empty, add new pbuf */ + if (chain_buf.p == NULL) { + chain_buf.p = chain_buf.ptr = p; + /* add pbuf to existing pbuf chain */ + } else { + if (chain_buf.p->tot_len + p->len > 0xffff) { + /* overflow */ + pbuf_free(p); + goto sendmsg_emsgsize; + } + pbuf_cat(chain_buf.p, p); + } + } + /* save size of total chain */ + if (err == ERR_OK) { + size = netbuf_len(&chain_buf); + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr)); + IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &chain_buf); + } + + /* deallocated the buffer */ + netbuf_free(&chain_buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? size : -1); +sendmsg_emsgsize: + sock_set_errno(sock, EMSGSIZE); + netbuf_free(&chain_buf); + done_socket(sock); + return -1; + } +#else /* LWIP_UDP || LWIP_RAW */ + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_UDP || LWIP_RAW */ +} + +ssize_t +lwip_sendto(int s, const void *data, size_t size, int flags, + const struct sockaddr *to, socklen_t tolen) +{ + struct lwip_sock *sock; + err_t err; + u16_t short_size; + u16_t remote_port; + struct netbuf buf; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCP + done_socket(sock); + return lwip_send(s, data, size, flags); +#else /* LWIP_TCP */ + LWIP_UNUSED_ARG(flags); + sock_set_errno(sock, err_to_errno(ERR_ARG)); + done_socket(sock); + return -1; +#endif /* LWIP_TCP */ + } + + if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) { + /* cannot fit into one datagram (at least for us) */ + sock_set_errno(sock, EMSGSIZE); + done_socket(sock); + return -1; + } + short_size = (u16_t)size; + LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) || + (IS_SOCK_ADDR_LEN_VALID(tolen) && + ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))), + sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;); + LWIP_UNUSED_ARG(tolen); + + /* initialize a buffer */ + buf.p = buf.ptr = NULL; +#if LWIP_CHECKSUM_ON_COPY + buf.flags = 0; +#endif /* LWIP_CHECKSUM_ON_COPY */ + if (to) { + SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); + } else { + remote_port = 0; + ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); + } + netbuf_fromport(&buf) = remote_port; + + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=", + s, data, short_size, flags)); + ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port)); + + /* make the buffer point to the data that should be sent */ +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Allocate a new netbuf and copy the data into it. */ + if (netbuf_alloc(&buf, short_size) == NULL) { + err = ERR_MEM; + } else { +#if LWIP_CHECKSUM_ON_COPY + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) { + u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size); + netbuf_set_chksum(&buf, chksum); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + MEMCPY(buf.p->payload, data, short_size); + } + err = ERR_OK; + } +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + err = netbuf_ref(&buf, data, short_size); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (err == ERR_OK) { +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */ + if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) { + unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr)); + IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* send the data */ + err = netconn_send(sock->conn, &buf); + } + + /* deallocated the buffer */ + netbuf_free(&buf); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? short_size : -1); +} + +int +lwip_socket(int domain, int type, int protocol) +{ + struct netconn *conn; + int i; + + LWIP_UNUSED_ARG(domain); /* @todo: check this */ + + /* create a netconn */ + switch (type) { + case SOCK_RAW: + conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW), + (u8_t)protocol, DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + case SOCK_DGRAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, + ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)), + DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); +#if LWIP_NETBUF_RECVINFO + if (conn) { + /* netconn layer enables pktinfo by default, sockets default to off */ + conn->flags &= ~NETCONN_FLAG_PKTINFO; + } +#endif /* LWIP_NETBUF_RECVINFO */ + break; + case SOCK_STREAM: + conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ", + domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n", + domain, type, protocol)); + set_errno(EINVAL); + return -1; + } + + if (!conn) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n")); + set_errno(ENOBUFS); + return -1; + } + + i = alloc_socket(conn, 0); + + if (i == -1) { + netconn_delete(conn); + set_errno(ENFILE); + return -1; + } + conn->socket = i; + done_socket(&sockets[i - LWIP_SOCKET_OFFSET]); + LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i)); + set_errno(0); + return i; +} + +ssize_t +lwip_write(int s, const void *data, size_t size) +{ + return lwip_send(s, data, size, 0); +} + +ssize_t +lwip_writev(int s, const struct iovec *iov, int iovcnt) +{ + struct msghdr msg; + + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Hack: we have to cast via number to cast from 'const' pointer to non-const. + Blame the opengroup standard for this inconsistency. */ + msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov); + msg.msg_iovlen = iovcnt; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = 0; + return lwip_sendmsg(s, &msg, 0); +} + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/* Add select_cb to select_cb_list. */ +static void +lwip_link_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Protect the select_cb_list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + + /* Put this select_cb on top of list */ + select_cb->next = select_cb_list; + if (select_cb_list != NULL) { + select_cb_list->prev = select_cb; + } + select_cb_list = select_cb; +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + + /* Now we can safely unprotect */ + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} + +/* Remove select_cb from select_cb_list. */ +static void +lwip_unlink_select_cb(struct lwip_select_cb *select_cb) +{ + LWIP_SOCKET_SELECT_DECL_PROTECT(lev); + + /* Take us off the list */ + LWIP_SOCKET_SELECT_PROTECT(lev); + if (select_cb->next != NULL) { + select_cb->next->prev = select_cb->prev; + } + if (select_cb_list == select_cb) { + LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL); + select_cb_list = select_cb->next; + } else { + LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL); + select_cb->prev->next = select_cb->next; + } +#if !LWIP_TCPIP_CORE_LOCKING + /* Increasing this counter tells select_check_waiters that the list has changed. */ + select_cb_ctr++; +#endif + LWIP_SOCKET_SELECT_UNPROTECT(lev); +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT +/** + * Go through the readset and writeset lists and see which socket of the sockets + * set in the sets has events. On return, readset, writeset and exceptset have + * the sockets enabled that had events. + * + * @param maxfdp1 the highest socket index in the sets + * @param readset_in set of sockets to check for read events + * @param writeset_in set of sockets to check for write events + * @param exceptset_in set of sockets to check for error events + * @param readset_out set of sockets that had read events + * @param writeset_out set of sockets that had write events + * @param exceptset_out set os sockets that had error events + * @return number of sockets that had events (read/write/exception) (>= 0) + */ +static int +lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in, + fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out) +{ + int i, nready = 0; + fd_set lreadset, lwriteset, lexceptset; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + FD_ZERO(&lreadset); + FD_ZERO(&lwriteset); + FD_ZERO(&lexceptset); + + /* Go through each socket in each list to count number of sockets which + currently match */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + /* if this FD is not in the set, continue */ + if (!(readset_in && FD_ISSET(i, readset_in)) && + !(writeset_in && FD_ISSET(i, writeset_in)) && + !(exceptset_in && FD_ISSET(i, exceptset_in))) { + continue; + } + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + void *lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + SYS_ARCH_UNPROTECT(lev); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) { + FD_SET(i, &lreadset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i)); + nready++; + } + /* See if netconn of this socket is ready for write */ + if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) { + FD_SET(i, &lwriteset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i)); + nready++; + } + /* See if netconn of this socket had an error */ + if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) { + FD_SET(i, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i)); + nready++; + } + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* no a valid open socket */ + return -1; + } + } + /* copy local sets to the ones provided as arguments */ + *readset_out = lreadset; + *writeset_out = lwriteset; + *exceptset_out = lexceptset; + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all of the set sockets in one of the three fdsets passed to select as used. + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_selscan aborts select when non-open sockets are found. + */ +static void +lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets) +{ + SYS_ARCH_DECL_PROTECT(lev); + if (fdset) { + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is in the set, lock it (unless already done) */ + if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* leave the socket used until released by lwip_select_dec_sockets_used */ + FD_SET(i, used_sockets); + } + SYS_ARCH_UNPROTECT(lev); + } + } + } +} + +/* Mark all sockets passed to select as used to prevent them from being freed + * from other threads while select is running. + * Marked sockets are added to 'used_sockets' to mark them only once an be able + * to unmark them correctly. + */ +static void +lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets) +{ + FD_ZERO(used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets); + lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets); +} + +/* Let go all sockets that were marked as used when starting select */ +static void +lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets) +{ + int i; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) { + /* if this FD is not in the set, continue */ + if (FD_ISSET(i, used_sockets)) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(i); + LWIP_ASSERT("socket gone at the end of select", sock != NULL); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets) +#define lwip_select_dec_sockets_used(maxfdp1, used_sockets) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, + struct timeval *timeout) +{ + u32_t waitres = 0; + int nready; + fd_set lreadset, lwriteset, lexceptset; + u32_t msectimeout; + int i; + int maxfdp2; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif +#if LWIP_NETCONN_FULLDUPLEX + fd_set used_sockets; +#endif + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n", + maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, + timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1, + timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1)); + + if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) { + set_errno(EINVAL); + return -1; + } + + lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets); + + /* Go through each socket in each list to count number of sockets which + currently match */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + + if (nready < 0) { + /* one of the sockets in one of the fd_sets was invalid */ + set_errno(EBADF); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } else if (nready > 0) { + /* one or more sockets are set, no need to wait */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } else { + /* If we don't have any current events, then suspend if we are supposed to */ + if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables (unless we're running in MPU compatible + mode). */ + API_SELECT_CB_VAR_DECLARE(select_cb); + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + API_SELECT_CB_VAR_REF(select_cb).readset = readset; + API_SELECT_CB_VAR_REF(select_cb).writeset = writeset; + API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(ENOMEM); + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in */ + maxfdp2 = maxfdp1; + for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + set_errno(EBUSY); + break; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + /* Not a valid socket */ + nready = -1; + maxfdp2 = i; + SYS_ARCH_UNPROTECT(lev); + set_errno(EBADF); + break; + } + } + } + + if (nready >= 0) { + /* Call lwip_selscan again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout == 0) { + /* Wait forever */ + msectimeout = 0; + } else { + long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000)); + if (msecs_long <= 0) { + /* Wait 1ms at least (0 means wait forever) */ + msectimeout = 1; + } else { + msectimeout = (u32_t)msecs_long; + } + } + + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + } + + /* Decrease select_waiting for each socket we are interested in */ + for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) { + if ((readset && FD_ISSET(i, readset)) || + (writeset && FD_ISSET(i, writeset)) || + (exceptset && FD_ISSET(i, exceptset))) { + struct lwip_sock *sock; + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(i); + if (sock != NULL) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + } else { + SYS_ARCH_UNPROTECT(lev); + /* Not a valid socket */ + nready = -1; + set_errno(EBADF); + } + } + } + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); + /* This is OK as the local fdsets are empty and nready is zero, + or we would have returned earlier. */ + } else { + /* See what's set now after waiting */ + nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); + } + } + } + + lwip_select_dec_sockets_used(maxfdp1, &used_sockets); + set_errno(0); + if (readset) { + *readset = lreadset; + } + if (writeset) { + *writeset = lwriteset; + } + if (exceptset) { + *exceptset = lexceptset; + } + return nready; +} +#endif /* LWIP_SOCKET_SELECT */ + +#if LWIP_SOCKET_POLL +/** Options for the lwip_pollscan function. */ +enum lwip_pollscan_opts +{ + /** Clear revents in each struct pollfd. */ + LWIP_POLLSCAN_CLEAR = 1, + + /** Increment select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_INC_WAIT = 2, + + /** Decrement select_waiting in each struct lwip_sock. */ + LWIP_POLLSCAN_DEC_WAIT = 4 +}; + +/** + * Update revents in each struct pollfd. + * Optionally update select_waiting in struct lwip_sock. + * + * @param fds array of structures to update + * @param nfds number of structures in fds + * @param opts what to update and how + * @return number of structures that have revents != 0 + */ +static int +lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts) +{ + int nready = 0; + nfds_t fdi; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + if ((opts & LWIP_POLLSCAN_CLEAR) != 0) { + fds[fdi].revents = 0; + } + + /* Negative fd means the caller wants us to ignore this struct. + POLLNVAL means we already detected that the fd is invalid; + if another thread has since opened a new socket with that fd, + we must not use that socket. */ + if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) { + /* First get the socket's status (protected)... */ + SYS_ARCH_PROTECT(lev); + sock = tryget_socket_unconn_locked(fds[fdi].fd); + if (sock != NULL) { + void* lastdata = sock->lastdata.pbuf; + s16_t rcvevent = sock->rcvevent; + u16_t sendevent = sock->sendevent; + u16_t errevent = sock->errevent; + + if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) { + sock->select_waiting++; + if (sock->select_waiting == 0) { + /* overflow - too many threads waiting */ + sock->select_waiting--; + nready = -1; + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + break; + } + } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) { + /* for now, handle select_waiting==0... */ + LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0); + if (sock->select_waiting > 0) { + sock->select_waiting--; + } + } + SYS_ARCH_UNPROTECT(lev); + done_socket(sock); + + /* ... then examine it: */ + /* See if netconn of this socket is ready for read */ + if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) { + fds[fdi].revents |= POLLIN; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd)); + } + /* See if netconn of this socket is ready for write */ + if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) { + fds[fdi].revents |= POLLOUT; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd)); + } + /* See if netconn of this socket had an error */ + if (errevent != 0) { + /* POLLERR is output only. */ + fds[fdi].revents |= POLLERR; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd)); + } + } else { + /* Not a valid socket */ + SYS_ARCH_UNPROTECT(lev); + /* POLLNVAL is output only. */ + fds[fdi].revents |= POLLNVAL; + return -1; + } + } + + /* Will return the number of structures that have events, + not the number of events. */ + if (fds[fdi].revents != 0) { + nready++; + } + } + + LWIP_ASSERT("nready >= 0", nready >= 0); + return nready; +} + +#if LWIP_NETCONN_FULLDUPLEX +/* Mark all sockets as used. + * + * All sockets are marked (and later unmarked), whether they are open or not. + * This is OK as lwip_pollscan aborts select when non-open sockets are found. + */ +static void +lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + /* Increase the reference counter */ + tryget_socket_unconn(fds[fdi].fd); + } + } +} + +/* Let go all sockets that were marked as used when starting poll */ +static void +lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds) +{ + nfds_t fdi; + + if(fds) { + /* Go through each struct pollfd in the array. */ + for (fdi = 0; fdi < nfds; fdi++) { + struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd); + if (sock != NULL) { + done_socket(sock); + } + } + } +} +#else /* LWIP_NETCONN_FULLDUPLEX */ +#define lwip_poll_inc_sockets_used(fds, nfds) +#define lwip_poll_dec_sockets_used(fds, nfds) +#endif /* LWIP_NETCONN_FULLDUPLEX */ + +int +lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + u32_t waitres = 0; + int nready; + u32_t msectimeout; +#if LWIP_NETCONN_SEM_PER_THREAD + int waited = 0; +#endif + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n", + (void*)fds, (int)nfds, timeout)); + LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)), + set_errno(EINVAL); return -1;); + + lwip_poll_inc_sockets_used(fds, nfds); + + /* Go through each struct pollfd to count number of structures + which currently match */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR); + + if (nready < 0) { + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + /* If we don't have any current events, then suspend if we are supposed to */ + if (!nready) { + API_SELECT_CB_VAR_DECLARE(select_cb); + + if (timeout == 0) { + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n")); + goto return_success; + } + API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1); + memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb)); + + /* None ready: add our semaphore to list: + We don't actually need any dynamic memory. Our entry on the + list is only valid while we are in this function, so it's ok + to use local variables. */ + + API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds; + API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds; +#if LWIP_NETCONN_SEM_PER_THREAD + API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) { + /* failed to create semaphore */ + set_errno(EAGAIN); + lwip_poll_dec_sockets_used(fds, nfds); + API_SELECT_CB_VAR_FREE(select_cb); + return -1; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + + /* Increase select_waiting for each socket we are interested in. + Also, check for events again: there could have been events between + the last scan (without us on the list) and putting us on the list! */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT); + + if (!nready) { + /* Still none ready, just wait to be woken */ + if (timeout < 0) { + /* Wait forever */ + msectimeout = 0; + } else { + /* timeout == 0 would have been handled earlier. */ + LWIP_ASSERT("timeout > 0", timeout > 0); + msectimeout = timeout; + } + waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout); +#if LWIP_NETCONN_SEM_PER_THREAD + waited = 1; +#endif + } + + /* Decrease select_waiting for each socket we are interested in, + and check which events occurred while we waited. */ + nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT); + + lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb)); + +#if LWIP_NETCONN_SEM_PER_THREAD + if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) { + /* don't leave the thread-local semaphore signalled */ + sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1); + } +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + API_SELECT_CB_VAR_FREE(select_cb); + + if (nready < 0) { + /* This happens when a socket got closed while waiting */ + lwip_poll_dec_sockets_used(fds, nfds); + return -1; + } + + if (waitres == SYS_ARCH_TIMEOUT) { + /* Timeout */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n")); + goto return_success; + } + } + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready)); +return_success: + lwip_poll_dec_sockets_used(fds, nfds); + set_errno(0); + return nready; +} + +/** + * Check whether event_callback should wake up a thread waiting in + * lwip_poll. + */ +static int +lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent) +{ + nfds_t fdi; + for (fdi = 0; fdi < scb->poll_nfds; fdi++) { + const struct pollfd *pollfd = &scb->poll_fds[fdi]; + if (pollfd->fd == fd) { + /* Do not update pollfd->revents right here; + that would be a data race because lwip_pollscan + accesses revents without protecting. */ + if (has_recvevent && (pollfd->events & POLLIN) != 0) { + return 1; + } + if (has_sendevent && (pollfd->events & POLLOUT) != 0) { + return 1; + } + if (has_errevent) { + /* POLLERR is output only. */ + return 1; + } + } + } + return 0; +} +#endif /* LWIP_SOCKET_POLL */ + +#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL +/** + * Callback registered in the netconn layer for each socket-netconn. + * Processes recvevent (data available) and wakes up tasks waiting for select. + * + * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function + * must have the core lock held when signaling the following events + * as they might cause select_list_cb to be checked: + * NETCONN_EVT_RCVPLUS + * NETCONN_EVT_SENDPLUS + * NETCONN_EVT_ERROR + * This requirement will be asserted in select_check_waiters() + */ +static void +event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) +{ + int s, check_waiters; + struct lwip_sock *sock; + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_UNUSED_ARG(len); + + /* Get socket */ + if (conn) { + s = conn->socket; + if (s < 0) { + /* Data comes in right away after an accept, even though + * the server task might not have created a new socket yet. + * Just count down (or up) if that's the case and we + * will use the data later. Note that only receive events + * can happen before the new socket is set up. */ + SYS_ARCH_PROTECT(lev); + if (conn->socket < 0) { + if (evt == NETCONN_EVT_RCVPLUS) { + /* conn->socket is -1 on initialization + lwip_accept adjusts sock->recvevent if conn->socket < -1 */ + conn->socket--; + } + SYS_ARCH_UNPROTECT(lev); + return; + } + s = conn->socket; + SYS_ARCH_UNPROTECT(lev); + } + + sock = get_socket(s); + if (!sock) { + return; + } + } else { + return; + } + + check_waiters = 1; + SYS_ARCH_PROTECT(lev); + /* Set event as required */ + switch (evt) { + case NETCONN_EVT_RCVPLUS: + sock->rcvevent++; + if (sock->rcvevent > 1) { + check_waiters = 0; + } + break; + case NETCONN_EVT_RCVMINUS: + sock->rcvevent--; + check_waiters = 0; + break; + case NETCONN_EVT_SENDPLUS: + if (sock->sendevent) { + check_waiters = 0; + } + sock->sendevent = 1; + break; + case NETCONN_EVT_SENDMINUS: + sock->sendevent = 0; + check_waiters = 0; + break; + case NETCONN_EVT_ERROR: + sock->errevent = 1; + break; + default: + LWIP_ASSERT("unknown event", 0); + break; + } + + if (sock->select_waiting && check_waiters) { + /* Save which events are active */ + int has_recvevent, has_sendevent, has_errevent; + has_recvevent = sock->rcvevent > 0; + has_sendevent = sock->sendevent != 0; + has_errevent = sock->errevent != 0; + SYS_ARCH_UNPROTECT(lev); + /* Check any select calls waiting on this socket */ + select_check_waiters(s, has_recvevent, has_sendevent, has_errevent); + } else { + SYS_ARCH_UNPROTECT(lev); + } + done_socket(sock); +} + +/** + * Check if any select waiters are waiting on this socket and its events + * + * @note on synchronization of select_cb_list: + * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding + * the core lock. We do a single pass through the list and signal any waiters. + * Core lock should already be held when calling here!!!! + + * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration + * of the loop, thus creating a possibility where a thread could modify the + * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to + * detect this change and restart the list walk. The list is expected to be small + */ +static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent) +{ + struct lwip_select_cb *scb; +#if !LWIP_TCPIP_CORE_LOCKING + int last_select_cb_ctr; + SYS_ARCH_DECL_PROTECT(lev); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if !LWIP_TCPIP_CORE_LOCKING + SYS_ARCH_PROTECT(lev); +again: + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + for (scb = select_cb_list; scb != NULL; scb = scb->next) { + if (scb->sem_signalled == 0) { + /* semaphore not signalled yet */ + int do_signal = 0; +#if LWIP_SOCKET_POLL + if (scb->poll_fds != NULL) { + do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent); + } +#endif /* LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL + else +#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */ +#if LWIP_SOCKET_SELECT + { + /* Test this select call for our socket */ + if (has_recvevent) { + if (scb->readset && FD_ISSET(s, scb->readset)) { + do_signal = 1; + } + } + if (has_sendevent) { + if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) { + do_signal = 1; + } + } + if (has_errevent) { + if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) { + do_signal = 1; + } + } + } +#endif /* LWIP_SOCKET_SELECT */ + if (do_signal) { + scb->sem_signalled = 1; + /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling + the semaphore, as this might lead to the select thread taking itself off the list, + invalidating the semaphore. */ + sys_sem_signal(SELECT_SEM_PTR(scb->sem)); + } + } +#if LWIP_TCPIP_CORE_LOCKING + } +#else + /* unlock interrupts with each step */ + SYS_ARCH_UNPROTECT(lev); + /* this makes sure interrupt protection time is short */ + SYS_ARCH_PROTECT(lev); + if (last_select_cb_ctr != select_cb_ctr) { + /* someone has changed select_cb_list, restart at the beginning */ + goto again; + } + /* remember the state of select_cb_list to detect changes */ + last_select_cb_ctr = select_cb_ctr; + } + SYS_ARCH_UNPROTECT(lev); +#endif +} +#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */ + +/** + * Close one end of a full-duplex connection. + */ +int +lwip_shutdown(int s, int how) +{ + struct lwip_sock *sock; + err_t err; + u8_t shut_rx = 0, shut_tx = 0; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how)); + + sock = get_socket(s); + if (!sock) { + return -1; + } + + if (sock->conn != NULL) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + sock_set_errno(sock, EOPNOTSUPP); + done_socket(sock); + return -1; + } + } else { + sock_set_errno(sock, ENOTCONN); + done_socket(sock); + return -1; + } + + if (how == SHUT_RD) { + shut_rx = 1; + } else if (how == SHUT_WR) { + shut_tx = 1; + } else if (how == SHUT_RDWR) { + shut_rx = 1; + shut_tx = 1; + } else { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } + err = netconn_shutdown(sock->conn, shut_rx, shut_tx); + + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return (err == ERR_OK ? 0 : -1); +} + +static int +lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local) +{ + struct lwip_sock *sock; + union sockaddr_aligned saddr; + ip_addr_t naddr; + u16_t port; + err_t err; + + sock = get_socket(s); + if (!sock) { + return -1; + } + + /* get the IP address and port */ + err = netconn_getaddr(sock->conn, &naddr, &port, local); + if (err != ERR_OK) { + sock_set_errno(sock, err_to_errno(err)); + done_socket(sock); + return -1; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */ + if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) && + IP_IS_V4_VAL(naddr)) { + ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr)); + IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6); + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port); + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s)); + ip_addr_debug_print_val(SOCKETS_DEBUG, naddr); + LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port)); + + if (*namelen > saddr.sa.sa_len) { + *namelen = saddr.sa.sa_len; + } + MEMCPY(name, &saddr, *namelen); + + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +} + +int +lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 0); +} + +int +lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen) +{ + return lwip_getaddrname(s, name, namelen, 1); +} + +int +lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if ((NULL == optval) || (NULL == optlen)) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_getsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen; +#if !LWIP_MPU_COMPATIBLE + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval; +#endif /* !LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* write back optlen and optval */ + *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen); +#endif /* LWIP_MPU_COMPATIBLE */ + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_getsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_getsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_getsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.p, +#endif /* LWIP_MPU_COMPATIBLE */ + &data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static int +lwip_sockopt_to_ipopt(int optname) +{ + /* Map SO_* values to our internal SOF_* values + * We should not rely on #defines in socket.h + * being in sync with ip.h. + */ + switch (optname) { + case SO_BROADCAST: + return SOF_BROADCAST; + case SO_KEEPALIVE: + return SOF_KEEPALIVE; + case SO_REUSEADDR: + return SOF_REUSEADDR; + default: + LWIP_ASSERT("Unknown socket option", 0); + return 0; + } +} + +/** lwip_getsockopt_impl: the actual implementation of getsockopt: + * same argument as lwip_getsockopt, either called directly or through callback + */ +static int +lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT + if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + +#if LWIP_TCP + case SO_ACCEPTCONN: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) { + done_socket(sock); + return ENOPROTOOPT; + } + if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) { + *(int *)optval = 1; + } else { + *(int *)optval = 0; + } + break; +#endif /* LWIP_TCP */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n", + s, optname, (*(int *)optval ? "on" : "off"))); + break; + + case SO_TYPE: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { + case NETCONN_RAW: + *(int *)optval = SOCK_RAW; + break; + case NETCONN_TCP: + *(int *)optval = SOCK_STREAM; + break; + case NETCONN_UDP: + *(int *)optval = SOCK_DGRAM; + break; + default: /* unrecognized socket type */ + *(int *)optval = netconn_type(sock->conn); + LWIP_DEBUGF(SOCKETS_DEBUG, + ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n", + s, *(int *)optval)); + } /* switch (netconn_type(sock->conn)) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n", + s, *(int *)optval)); + break; + + case SO_ERROR: + LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int); + *(int *)optval = err_to_errno(netconn_err(sock->conn)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn)); + break; +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn)); + break; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = netconn_get_recvbufsize(sock->conn); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + s16_t conn_linger; + struct linger *linger = (struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger); + conn_linger = sock->conn->linger; + if (conn_linger >= 0) { + linger->l_onoff = 1; + linger->l_linger = (int)conn_linger; + } else { + linger->l_onoff = 0; + linger->l_linger = 0; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0; + break; +#endif /* LWIP_UDP*/ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->ttl; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + *(int *)optval = sock->conn->pcb.ip->tos; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n", + s, *(int *)optval)); + break; +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n", + s, *(int *)optval)); + break; + case IP_MULTICAST_IF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr); + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) { + done_socket(sock); + return ENOPROTOOPT; + } + inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp)); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n", + s, *(u32_t *)optval)); + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t); + if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) { + *(u8_t *)optval = 1; + } else { + *(u8_t *)optval = 0; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n", + s, (*(int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n", + s, *(int *)optval)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPINTVL: + *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n", + s, *(int *)optval)); + break; + case TCP_KEEPCNT: + *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n", + s, *(int *)optval)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP */ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int); + *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n", + s, *(int *)optval)); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_tx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + *(int *)optval = sock->conn->pcb.udp->chksum_len_rx; + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n", + s, (*(int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW); + if (sock->conn->pcb.raw->chksum_reqd == 0) { + *(int *)optval = -1; + } else { + *(int *)optval = sock->conn->pcb.raw->chksum_offset; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n", + s, (*(int *)optval)) ); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = get_socket(s); +#if !LWIP_TCPIP_CORE_LOCKING + err_t cberr; + LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data); +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + + if (!sock) { + return -1; + } + + if (NULL == optval) { + sock_set_errno(sock, EFAULT); + done_socket(sock); + return -1; + } + +#if LWIP_TCPIP_CORE_LOCKING + /* core-locking can just call the -impl function */ + LOCK_TCPIP_CORE(); + err = lwip_setsockopt_impl(s, level, optname, optval, optlen); + UNLOCK_TCPIP_CORE(); + +#else /* LWIP_TCPIP_CORE_LOCKING */ + +#if LWIP_MPU_COMPATIBLE + /* MPU_COMPATIBLE copies the optval data, so check for max size here */ + if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) { + sock_set_errno(sock, ENOBUFS); + done_socket(sock); + return -1; + } +#endif /* LWIP_MPU_COMPATIBLE */ + + LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock); + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname; + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen; +#if LWIP_MPU_COMPATIBLE + MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen); +#else /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval; +#endif /* LWIP_MPU_COMPATIBLE */ + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0; +#if LWIP_NETCONN_SEM_PER_THREAD + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else + LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed; +#endif + cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data)); + if (cberr != ERR_OK) { + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); + sock_set_errno(sock, err_to_errno(cberr)); + done_socket(sock); + return -1; + } + sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0); + + /* maybe lwip_getsockopt_internal has changed err */ + err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err; + LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data); +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sock_set_errno(sock, err); + done_socket(sock); + return err ? -1 : 0; +} + +#if !LWIP_TCPIP_CORE_LOCKING +/** lwip_setsockopt_callback: only used without CORE_LOCKING + * to get into the tcpip_thread + */ +static void +lwip_setsockopt_callback(void *arg) +{ + struct lwip_setgetsockopt_data *data; + LWIP_ASSERT("arg != NULL", arg != NULL); + data = (struct lwip_setgetsockopt_data *)arg; + + data->err = lwip_setsockopt_impl(data->s, data->level, data->optname, +#if LWIP_MPU_COMPATIBLE + data->optval, +#else /* LWIP_MPU_COMPATIBLE */ + data->optval.pc, +#endif /* LWIP_MPU_COMPATIBLE */ + data->optlen); + + sys_sem_signal((sys_sem_t *)(data->completed_sem)); +} +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +/** lwip_setsockopt_impl: the actual implementation of setsockopt: + * same argument as lwip_setsockopt, either called directly or through callback + */ +static int +lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen) +{ + int err = 0; + struct lwip_sock *sock = tryget_socket(s); + if (!sock) { + return EBADF; + } + +#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT + if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) { + return err; + } +#endif + + switch (level) { + + /* Level: SOL_SOCKET */ + case SOL_SOCKET: + switch (optname) { + + /* SO_ACCEPTCONN is get-only */ + + /* The option flags */ + case SO_BROADCAST: + case SO_KEEPALIVE: +#if SO_REUSE + case SO_REUSEADDR: +#endif /* SO_REUSE */ + if ((optname == SO_BROADCAST) && + (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) { + done_socket(sock); + return ENOPROTOOPT; + } + + optname = lwip_sockopt_to_ipopt(optname); + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + ip_set_option(sock->conn->pcb.ip, optname); + } else { + ip_reset_option(sock->conn->pcb.ip, optname); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n", + s, optname, (*(const int *)optval ? "on" : "off"))); + break; + + /* SO_TYPE is get-only */ + /* SO_ERROR is get-only */ + +#if LWIP_SO_SNDTIMEO + case SO_SNDTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_sendtimeout(sock->conn, ms_long); + break; + } +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO + case SO_RCVTIMEO: { + long ms_long; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE); + ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval); + if (ms_long < 0) { + done_socket(sock); + return EINVAL; + } + netconn_set_recvtimeout(sock->conn, (u32_t)ms_long); + break; + } +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + case SO_RCVBUF: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int); + netconn_set_recvbufsize(sock->conn, *(const int *)optval); + break; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + case SO_LINGER: { + const struct linger *linger = (const struct linger *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger); + if (linger->l_onoff) { + int lingersec = linger->l_linger; + if (lingersec < 0) { + done_socket(sock); + return EINVAL; + } + if (lingersec > 0xFFFF) { + lingersec = 0xFFFF; + } + sock->conn->linger = (s16_t)lingersec; + } else { + sock->conn->linger = -1; + } + } + break; +#endif /* LWIP_SO_LINGER */ +#if LWIP_UDP + case SO_NO_CHECK: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); +#if LWIP_UDPLITE + if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) { + /* this flag is only available for UDP, not for UDP lite */ + done_socket(sock); + return EAFNOSUPPORT; + } +#endif /* LWIP_UDPLITE */ + if (*(const int *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM); + } + break; +#endif /* LWIP_UDP */ + case SO_BINDTODEVICE: { + const struct ifreq *iface; + struct netif *n = NULL; + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq); + + iface = (const struct ifreq *)optval; + if (iface->ifr_name[0] != 0) { + n = netif_find(iface->ifr_name); + if (n == NULL) { + done_socket(sock); + return ENODEV; + } + } + + switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) { +#if LWIP_TCP + case NETCONN_TCP: + tcp_bind_netif(sock->conn->pcb.tcp, n); + break; +#endif +#if LWIP_UDP + case NETCONN_UDP: + udp_bind_netif(sock->conn->pcb.udp, n); + break; +#endif +#if LWIP_RAW + case NETCONN_RAW: + raw_bind_netif(sock->conn->pcb.raw, n); + break; +#endif + default: + LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0); + break; + } + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + + /* Level: IPPROTO_IP */ + case IPPROTO_IP: + switch (optname) { + case IP_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n", + s, sock->conn->pcb.ip->ttl)); + break; + case IP_TOS: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n", + s, sock->conn->pcb.ip->tos)); + break; +#if LWIP_NETBUF_RECVINFO + case IP_PKTINFO: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP); + if (*(const int *)optval) { + sock->conn->flags |= NETCONN_FLAG_PKTINFO; + } else { + sock->conn->flags &= ~NETCONN_FLAG_PKTINFO; + } + break; +#endif /* LWIP_NETBUF_RECVINFO */ +#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP + case IP_MULTICAST_TTL: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval)); + break; + case IP_MULTICAST_IF: { + ip4_addr_t if_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval); + udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr); + } + break; + case IP_MULTICAST_LOOP: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP); + if (*(const u8_t *)optval) { + udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } else { + udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP); + } + break; +#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */ +#if LWIP_IGMP + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t igmp_err; + const struct ip_mreq *imr = (const struct ip_mreq *)optval; + ip4_addr_t if_addr; + ip4_addr_t multi_addr; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP); + inet_addr_to_ip4addr(&if_addr, &imr->imr_interface); + inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr); + if (optname == IP_ADD_MEMBERSHIP) { + if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + igmp_err = ERR_OK; + } else { + igmp_err = igmp_joingroup(&if_addr, &multi_addr); + } + } else { + igmp_err = igmp_leavegroup(&if_addr, &multi_addr); + lwip_socket_unregister_membership(s, &if_addr, &multi_addr); + } + if (igmp_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IGMP */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + +#if LWIP_TCP + /* Level: IPPROTO_TCP */ + case IPPROTO_TCP: + /* Special case: all IPPROTO_TCP option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP); + if (sock->conn->pcb.tcp->state == LISTEN) { + done_socket(sock); + return EINVAL; + } + switch (optname) { + case TCP_NODELAY: + if (*(const int *)optval) { + tcp_nagle_disable(sock->conn->pcb.tcp); + } else { + tcp_nagle_enable(sock->conn->pcb.tcp); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n", + s, (*(const int *)optval) ? "on" : "off") ); + break; + case TCP_KEEPALIVE: + sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + +#if LWIP_TCP_KEEPALIVE + case TCP_KEEPIDLE: + sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_idle)); + break; + case TCP_KEEPINTVL: + sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_intvl)); + break; + case TCP_KEEPCNT: + sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n", + s, sock->conn->pcb.tcp->keep_cnt)); + break; +#endif /* LWIP_TCP_KEEPALIVE */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_TCP*/ + +#if LWIP_IPV6 + /* Level: IPPROTO_IPV6 */ + case IPPROTO_IPV6: + switch (optname) { + case IPV6_V6ONLY: + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + if (*(const int *)optval) { + netconn_set_ipv6only(sock->conn, 1); + } else { + netconn_set_ipv6only(sock->conn, 0); + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n", + s, (netconn_get_ipv6only(sock->conn) ? 1 : 0))); + break; +#if LWIP_IPV6_MLD + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: { + /* If this is a TCP or a RAW socket, ignore these options. */ + err_t mld6_err; + struct netif *netif; + ip6_addr_t multi_addr; + const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval; + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP); + inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr); + LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu); + netif = netif_get_by_index((u8_t)imr->ipv6mr_interface); + if (netif == NULL) { + err = EADDRNOTAVAIL; + break; + } + + if (optname == IPV6_JOIN_GROUP) { + if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) { + /* cannot track membership (out of memory) */ + err = ENOMEM; + mld6_err = ERR_OK; + } else { + mld6_err = mld6_joingroup_netif(netif, &multi_addr); + } + } else { + mld6_err = mld6_leavegroup_netif(netif, &multi_addr); + lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr); + } + if (mld6_err != ERR_OK) { + err = EADDRNOTAVAIL; + } + } + break; +#endif /* LWIP_IPV6_MLD */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_IPV6 */ + +#if LWIP_UDP && LWIP_UDPLITE + /* Level: IPPROTO_UDPLITE */ + case IPPROTO_UDPLITE: + /* Special case: all IPPROTO_UDPLITE option take an int */ + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int); + /* If this is no UDP lite socket, ignore any options. */ + if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) { + done_socket(sock); + return ENOPROTOOPT; + } + switch (optname) { + case UDPLITE_SEND_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_tx = 8; + } else { + sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + case UDPLITE_RECV_CSCOV: + if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) { + /* don't allow illegal values! */ + sock->conn->pcb.udp->chksum_len_rx = 8; + } else { + sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n", + s, (*(const int *)optval)) ); + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; +#endif /* LWIP_UDP */ + /* Level: IPPROTO_RAW */ + case IPPROTO_RAW: + switch (optname) { +#if LWIP_IPV6 && LWIP_RAW + case IPV6_CHECKSUM: + /* It should not be possible to disable the checksum generation with ICMPv6 + * as per RFC 3542 chapter 3.1 */ + if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) { + done_socket(sock); + return EINVAL; + } + + LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW); + if (*(const int *)optval < 0) { + sock->conn->pcb.raw->chksum_reqd = 0; + } else if (*(const int *)optval & 1) { + /* Per RFC3542, odd offsets are not allowed */ + done_socket(sock); + return EINVAL; + } else { + sock->conn->pcb.raw->chksum_reqd = 1; + sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval; + } + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n", + s, sock->conn->pcb.raw->chksum_reqd)); + break; +#endif /* LWIP_IPV6 && LWIP_RAW */ + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n", + s, optname)); + err = ENOPROTOOPT; + break; + } /* switch (optname) */ + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n", + s, level, optname)); + err = ENOPROTOOPT; + break; + } /* switch (level) */ + + done_socket(sock); + return err; +} + +int +lwip_ioctl(int s, long cmd, void *argp) +{ + struct lwip_sock *sock = get_socket(s); + u8_t val; +#if LWIP_SO_RCVBUF + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ + + if (!sock) { + return -1; + } + + switch (cmd) { +#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE + case FIONREAD: + if (!argp) { + sock_set_errno(sock, EINVAL); + done_socket(sock); + return -1; + } +#if LWIP_FIONREAD_LINUXMODE + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) { + struct netbuf *nb; + if (sock->lastdata.netbuf) { + nb = sock->lastdata.netbuf; + *((int *)argp) = nb->p->tot_len; + } else { + struct netbuf *rxbuf; + err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK); + if (err != ERR_OK) { + *((int *)argp) = 0; + } else { + sock->lastdata.netbuf = rxbuf; + *((int *)argp) = rxbuf->p->tot_len; + } + } + done_socket(sock); + return 0; + } +#endif /* LWIP_FIONREAD_LINUXMODE */ + +#if LWIP_SO_RCVBUF + /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */ + SYS_ARCH_GET(sock->conn->recv_avail, recv_avail); + if (recv_avail < 0) { + recv_avail = 0; + } + + /* Check if there is data left from the last recv operation. /maq 041215 */ + if (sock->lastdata.netbuf) { + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { + recv_avail += sock->lastdata.pbuf->tot_len; + } else { + recv_avail += sock->lastdata.netbuf->p->tot_len; + } + } + *((int *)argp) = recv_avail; + + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp))); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; +#else /* LWIP_SO_RCVBUF */ + break; +#endif /* LWIP_SO_RCVBUF */ +#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */ + + case (long)FIONBIO: + val = 0; + if (argp && *(int *)argp) { + val = 1; + } + netconn_set_nonblocking(sock->conn, val); + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val)); + sock_set_errno(sock, 0); + done_socket(sock); + return 0; + + default: + break; + } /* switch (cmd) */ + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + done_socket(sock); + return -1; +} + +/** A minimal implementation of fcntl. + * Currently only the commands F_GETFL and F_SETFL are implemented. + * The flag O_NONBLOCK and access modes are supported for F_GETFL, only + * the flag O_NONBLOCK is implemented for F_SETFL. + */ +int +lwip_fcntl(int s, int cmd, int val) +{ + struct lwip_sock *sock = get_socket(s); + int ret = -1; + int op_mode = 0; + + if (!sock) { + return -1; + } + + switch (cmd) { + case F_GETFL: + ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0; + sock_set_errno(sock, 0); + + if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) { +#if LWIP_TCPIP_CORE_LOCKING + LOCK_TCPIP_CORE(); +#else + SYS_ARCH_DECL_PROTECT(lev); + /* the proper thing to do here would be to get into the tcpip_thread, + but locking should be OK as well since we only *read* some flags */ + SYS_ARCH_PROTECT(lev); +#endif +#if LWIP_TCP + if (sock->conn->pcb.tcp) { + if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) { + op_mode |= O_RDONLY; + } + if (!(sock->conn->pcb.tcp->flags & TF_FIN)) { + op_mode |= O_WRONLY; + } + } +#endif +#if LWIP_TCPIP_CORE_LOCKING + UNLOCK_TCPIP_CORE(); +#else + SYS_ARCH_UNPROTECT(lev); +#endif + } else { + op_mode |= O_RDWR; + } + + /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */ + ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode; + + break; + case F_SETFL: + /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */ + val &= ~(O_RDONLY | O_WRONLY | O_RDWR); + if ((val & ~O_NONBLOCK) == 0) { + /* only O_NONBLOCK, all other bits are zero */ + netconn_set_nonblocking(sock->conn, val & O_NONBLOCK); + ret = 0; + sock_set_errno(sock, 0); + } else { + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + } + break; + default: + LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val)); + sock_set_errno(sock, ENOSYS); /* not yet implemented */ + break; + } + done_socket(sock); + return ret; +} + +#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES +int +fcntl(int s, int cmd, ...) +{ + va_list ap; + int val; + + va_start(ap, cmd); + val = va_arg(ap, int); + va_end(ap); + return lwip_fcntl(s, cmd, val); +} +#endif + +const char * +lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + const char *ret = NULL; + int size_int = (int)size; + if (size_int < 0) { + set_errno(ENOSPC); + return NULL; + } + switch (af) { +#if LWIP_IPV4 + case AF_INET: + ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif +#if LWIP_IPV6 + case AF_INET6: + ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int); + if (ret == NULL) { + set_errno(ENOSPC); + } + break; +#endif + default: + set_errno(EAFNOSUPPORT); + break; + } + return ret; +} + +int +lwip_inet_pton(int af, const char *src, void *dst) +{ + int err; + switch (af) { +#if LWIP_IPV4 + case AF_INET: + err = ip4addr_aton(src, (ip4_addr_t *)dst); + break; +#endif +#if LWIP_IPV6 + case AF_INET6: { + /* convert into temporary variable since ip6_addr_t might be larger + than in6_addr when scopes are enabled */ + ip6_addr_t addr; + err = ip6addr_aton(src, &addr); + if (err) { + memcpy(dst, &addr.addr, sizeof(addr.addr)); + } + break; + } +#endif + default: + err = -1; + set_errno(EAFNOSUPPORT); + break; + } + return err; +} + +#if LWIP_IGMP +/** Register a new IGMP membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == NULL) { + socket_ipv4_multicast_memberships[i].sock = sock; + ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr); + ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv4_multicast_memberships[i].sock == sock) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) && + ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv4_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr, if_addr; + ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr); + ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr); + socket_ipv4_multicast_memberships[i].sock = NULL; + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr); + ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr); + + netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IGMP */ + +#if LWIP_IPV6_MLD +/** Register a new MLD6 membership. On socket close, the membership is dropped automatically. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + * + * @return 1 on success, 0 on failure + */ +static int +lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return 0; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == NULL) { + socket_ipv6_multicast_memberships[i].sock = sock; + socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx; + ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr); + done_socket(sock); + return 1; + } + } + done_socket(sock); + return 0; +} + +/** Unregister a previously registered MLD6 membership. This prevents dropping the membership + * on socket close. + * + * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if ((socket_ipv6_multicast_memberships[i].sock == sock) && + (socket_ipv6_multicast_memberships[i].if_idx == if_idx) && + ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) { + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + break; + } + } + done_socket(sock); +} + +/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt. + * + * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK). + */ +static void +lwip_socket_drop_registered_mld6_memberships(int s) +{ + struct lwip_sock *sock = get_socket(s); + int i; + + if (!sock) { + return; + } + + for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) { + if (socket_ipv6_multicast_memberships[i].sock == sock) { + ip_addr_t multi_addr; + u8_t if_idx; + + ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr); + if_idx = socket_ipv6_multicast_memberships[i].if_idx; + + socket_ipv6_multicast_memberships[i].sock = NULL; + socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX; + ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr); + + netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE); + } + } + done_socket(sock); +} +#endif /* LWIP_IPV6_MLD */ + +#endif /* LWIP_SOCKET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c new file mode 100644 index 0000000..34c9a99 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/api/tcpip.c @@ -0,0 +1,658 @@ +/** + * @file + * Sequential API Main thread module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if !NO_SYS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcpip_priv.h" +#include "lwip/sys.h" +#include "lwip/memp.h" +#include "lwip/mem.h" +#include "lwip/init.h" +#include "lwip/ip.h" +#include "lwip/pbuf.h" +#include "lwip/etharp.h" +#include "netif/ethernet.h" + +#define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) +#define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) +#define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name, ERR_MEM) +#define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) + +/* global variables */ +static tcpip_init_done_fn tcpip_init_done; +static void *tcpip_init_done_arg; +static sys_mbox_t tcpip_mbox; + +#if LWIP_TCPIP_CORE_LOCKING +/** The global semaphore to lock the stack. */ +sys_mutex_t lock_tcpip_core; +#endif /* LWIP_TCPIP_CORE_LOCKING */ + +static void tcpip_thread_handle_msg(struct tcpip_msg *msg); + +#if !LWIP_TIMERS +/* wait for a message with timers disabled (e.g. pass a timer-check trigger into tcpip_thread) */ +#define TCPIP_MBOX_FETCH(mbox, msg) sys_mbox_fetch(mbox, msg) +#else /* !LWIP_TIMERS */ +/* wait for a message, timeouts are processed while waiting */ +#define TCPIP_MBOX_FETCH(mbox, msg) tcpip_timeouts_mbox_fetch(mbox, msg) +/** + * Wait (forever) for a message to arrive in an mbox. + * While waiting, timeouts are processed. + * + * @param mbox the mbox to fetch the message from + * @param msg the place to store the message + */ +static void +tcpip_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) +{ + u32_t sleeptime, res; + +again: + LWIP_ASSERT_CORE_LOCKED(); + + sleeptime = sys_timeouts_sleeptime(); + if (sleeptime == SYS_TIMEOUTS_SLEEPTIME_INFINITE) { + UNLOCK_TCPIP_CORE(); + sys_arch_mbox_fetch(mbox, msg, 0); + LOCK_TCPIP_CORE(); + return; + } else if (sleeptime == 0) { + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } + + UNLOCK_TCPIP_CORE(); + res = sys_arch_mbox_fetch(mbox, msg, sleeptime); + LOCK_TCPIP_CORE(); + if (res == SYS_ARCH_TIMEOUT) { + /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred + before a message could be fetched. */ + sys_check_timeouts(); + /* We try again to fetch a message from the mbox. */ + goto again; + } +} +#endif /* !LWIP_TIMERS */ + +/** + * The main lwIP thread. This thread has exclusive access to lwIP core functions + * (unless access to them is not locked). Other threads communicate with this + * thread using message boxes. + * + * It also starts all the timers to make sure they are running in the right + * thread context. + * + * @param arg unused argument + */ +static void +tcpip_thread(void *arg) +{ + struct tcpip_msg *msg; + LWIP_UNUSED_ARG(arg); + + LWIP_MARK_TCPIP_THREAD(); + + LOCK_TCPIP_CORE(); + if (tcpip_init_done != NULL) { + tcpip_init_done(tcpip_init_done_arg); + } + + while (1) { /* MAIN Loop */ + LWIP_TCPIP_THREAD_ALIVE(); + /* wait for a message, timeouts are processed while waiting */ + TCPIP_MBOX_FETCH(&tcpip_mbox, (void **)&msg); + if (msg == NULL) { + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n")); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + continue; + } + tcpip_thread_handle_msg(msg); + } +} + +/* Handle a single tcpip_msg + * This is in its own function for access by tests only. + */ +static void +tcpip_thread_handle_msg(struct tcpip_msg *msg) +{ + switch (msg->type) { +#if !LWIP_TCPIP_CORE_LOCKING + case TCPIP_MSG_API: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg)); + msg->msg.api_msg.function(msg->msg.api_msg.msg); + break; + case TCPIP_MSG_API_CALL: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg)); + msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg); + sys_sem_signal(msg->msg.api_call.sem); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING */ + +#if !LWIP_TCPIP_CORE_LOCKING_INPUT + case TCPIP_MSG_INPKT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); + if (msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif) != ERR_OK) { + pbuf_free(msg->msg.inp.p); + } + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + break; +#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */ + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS + case TCPIP_MSG_TIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg)); + sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + case TCPIP_MSG_UNTIMEOUT: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg)); + sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + case TCPIP_MSG_CALLBACK: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + memp_free(MEMP_TCPIP_MSG_API, msg); + break; + + case TCPIP_MSG_CALLBACK_STATIC: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg)); + msg->msg.cb.function(msg->msg.cb.ctx); + break; + + default: + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type)); + LWIP_ASSERT("tcpip_thread: invalid message", 0); + break; + } +} + +#ifdef TCPIP_THREAD_TEST +/** Work on queued items in single-threaded test mode */ +int +tcpip_thread_poll_one(void) +{ + int ret = 0; + struct tcpip_msg *msg; + + if (sys_arch_mbox_tryfetch(&tcpip_mbox, (void **)&msg) != SYS_ARCH_TIMEOUT) { + LOCK_TCPIP_CORE(); + if (msg != NULL) { + tcpip_thread_handle_msg(msg); + ret = 1; + } + UNLOCK_TCPIP_CORE(); + } + return ret; +} +#endif + +/** + * Pass a received packet to tcpip_thread for input processing + * + * @param p the received packet + * @param inp the network interface on which the packet was received + * @param input_fn input function to call + */ +err_t +tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) +{ +#if LWIP_TCPIP_CORE_LOCKING_INPUT + err_t ret; + LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_inpkt: PACKET %p/%p\n", (void *)p, (void *)inp)); + LOCK_TCPIP_CORE(); + ret = input_fn(p, inp); + UNLOCK_TCPIP_CORE(); + return ret; +#else /* LWIP_TCPIP_CORE_LOCKING_INPUT */ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_INPKT); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_INPKT; + msg->msg.inp.p = p; + msg->msg.inp.netif = inp; + msg->msg.inp.input_fn = input_fn; + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_INPKT, msg); + return ERR_MEM; + } + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */ +} + +/** + * @ingroup lwip_os + * Pass a received packet to tcpip_thread for input processing with + * ethernet_input or ip_input. Don't call directly, pass to netif_add() + * and call netif->input(). + * + * @param p the received packet, p->payload pointing to the Ethernet header or + * to an IP header (if inp doesn't have NETIF_FLAG_ETHARP or + * NETIF_FLAG_ETHERNET flags) + * @param inp the network interface on which the packet was received + */ +err_t +tcpip_input(struct pbuf *p, struct netif *inp) +{ +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return tcpip_inpkt(p, inp, ethernet_input); + } else +#endif /* LWIP_ETHERNET */ + return tcpip_inpkt(p, inp, ip_input); +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Blocks until the request is posted. + * Must not be called from interrupt context! + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_try_callback + */ +err_t +tcpip_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * @ingroup lwip_os + * Call a specific function in the thread context of + * tcpip_thread for easy access synchronization. + * A function called in that way may access lwIP core code + * without fearing concurrent access. + * Does NOT block when the request cannot be posted because the + * tcpip_mbox is full, but returns ERR_MEM instead. + * Can be called from interrupt context. + * + * @param function the function to call + * @param ctx parameter passed to f + * @return ERR_OK if the function was called, another err_t if not + * + * @see tcpip_callback + */ +err_t +tcpip_try_callback(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_CALLBACK; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + + if (sys_mbox_trypost(&tcpip_mbox, msg) != ERR_OK) { + memp_free(MEMP_TCPIP_MSG_API, msg); + return ERR_MEM; + } + return ERR_OK; +} + +#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS +/** + * call sys_timeout in tcpip_thread + * + * @param msecs time in milliseconds for timeout + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_timeout(u32_t msecs, sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_TIMEOUT; + msg->msg.tmo.msecs = msecs; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} + +/** + * call sys_untimeout in tcpip_thread + * + * @param h function to be called on timeout + * @param arg argument to pass to timeout function h + * @return ERR_MEM on memory error, ERR_OK otherwise + */ +err_t +tcpip_untimeout(sys_timeout_handler h, void *arg) +{ + struct tcpip_msg *msg; + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return ERR_MEM; + } + + msg->type = TCPIP_MSG_UNTIMEOUT; + msg->msg.tmo.h = h; + msg->msg.tmo.arg = arg; + sys_mbox_post(&tcpip_mbox, msg); + return ERR_OK; +} +#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */ + + +/** + * Sends a message to TCPIP thread to call a function. Caller thread blocks on + * on a provided semaphore, which ist NOT automatically signalled by TCPIP thread, + * this has to be done by the user. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING since this is the way + * with least runtime overhead. + * + * @param fn function to be called from TCPIP thread + * @param apimsg argument to API function + * @param sem semaphore to wait on + * @return ERR_OK if the function was called, another err_t if not + */ +err_t +tcpip_send_msg_wait_sem(tcpip_callback_fn fn, void *apimsg, sys_sem_t *sem) +{ +#if LWIP_TCPIP_CORE_LOCKING + LWIP_UNUSED_ARG(sem); + LOCK_TCPIP_CORE(); + fn(apimsg); + UNLOCK_TCPIP_CORE(); + return ERR_OK; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + + LWIP_ASSERT("semaphore not initialized", sys_sem_valid(sem)); + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.function = fn; + TCPIP_MSG_VAR_REF(msg).msg.api_msg.msg = apimsg; + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(sem, 0); + TCPIP_MSG_VAR_FREE(msg); + return ERR_OK; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * Synchronously calls function in TCPIP thread and waits for its completion. + * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or + * LWIP_NETCONN_SEM_PER_THREAD. + * If not, a semaphore is created and destroyed on every call which is usually + * an expensive/slow operation. + * @param fn Function to call + * @param call Call parameters + * @return Return value from tcpip_api_call_fn + */ +err_t +tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) +{ +#if LWIP_TCPIP_CORE_LOCKING + err_t err; + LOCK_TCPIP_CORE(); + err = fn(call); + UNLOCK_TCPIP_CORE(); + return err; +#else /* LWIP_TCPIP_CORE_LOCKING */ + TCPIP_MSG_VAR_DECLARE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + err_t err = sys_sem_new(&call->sem, 0); + if (err != ERR_OK) { + return err; + } +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + + TCPIP_MSG_VAR_ALLOC(msg); + TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; + TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; + TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; +#if LWIP_NETCONN_SEM_PER_THREAD + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ + TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + sys_mbox_post(&tcpip_mbox, &TCPIP_MSG_VAR_REF(msg)); + sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); + TCPIP_MSG_VAR_FREE(msg); + +#if !LWIP_NETCONN_SEM_PER_THREAD + sys_sem_free(&call->sem); +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + + return call->err; +#endif /* LWIP_TCPIP_CORE_LOCKING */ +} + +/** + * @ingroup lwip_os + * Allocate a structure for a static callback message and initialize it. + * The message has a special type such that lwIP never frees it. + * This is intended to be used to send "static" messages from interrupt context, + * e.g. the message is allocated once and posted several times from an IRQ + * using tcpip_callbackmsg_trycallback(). + * Example usage: Trigger execution of an ethernet IRQ DPC routine in lwIP thread context. + * + * @param function the function to call + * @param ctx parameter passed to function + * @return a struct pointer to pass to tcpip_callbackmsg_trycallback(). + * + * @see tcpip_callbackmsg_trycallback() + * @see tcpip_callbackmsg_delete() + */ +struct tcpip_callback_msg * +tcpip_callbackmsg_new(tcpip_callback_fn function, void *ctx) +{ + struct tcpip_msg *msg = (struct tcpip_msg *)memp_malloc(MEMP_TCPIP_MSG_API); + if (msg == NULL) { + return NULL; + } + msg->type = TCPIP_MSG_CALLBACK_STATIC; + msg->msg.cb.function = function; + msg->msg.cb.ctx = ctx; + return (struct tcpip_callback_msg *)msg; +} + +/** + * @ingroup lwip_os + * Free a callback message allocated by tcpip_callbackmsg_new(). + * + * @param msg the message to free + * + * @see tcpip_callbackmsg_new() + */ +void +tcpip_callbackmsg_delete(struct tcpip_callback_msg *msg) +{ + memp_free(MEMP_TCPIP_MSG_API, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread tcpip_mbox. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost() return code + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Try to post a callback-message to the tcpip_thread mbox. + * Same as @ref tcpip_callbackmsg_trycallback but calls sys_mbox_trypost_fromisr(), + * mainly to help FreeRTOS, where calls differ between task level and ISR level. + * + * @param msg pointer to the message to post + * @return sys_mbox_trypost_fromisr() return code (without change, so this + * knowledge can be used to e.g. propagate "bool needs_scheduling") + * + * @see tcpip_callbackmsg_new() + */ +err_t +tcpip_callbackmsg_trycallback_fromisr(struct tcpip_callback_msg *msg) +{ + LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(tcpip_mbox)); + return sys_mbox_trypost_fromisr(&tcpip_mbox, msg); +} + +/** + * @ingroup lwip_os + * Initialize this module: + * - initialize all sub modules + * - start the tcpip_thread + * + * @param initfunc a function to call when tcpip_thread is running and finished initializing + * @param arg argument to pass to initfunc + */ +void +tcpip_init(tcpip_init_done_fn initfunc, void *arg) +{ + lwip_init(); + + tcpip_init_done = initfunc; + tcpip_init_done_arg = arg; + if (sys_mbox_new(&tcpip_mbox, TCPIP_MBOX_SIZE) != ERR_OK) { + LWIP_ASSERT("failed to create tcpip_thread mbox", 0); + } +#if LWIP_TCPIP_CORE_LOCKING + if (sys_mutex_new(&lock_tcpip_core) != ERR_OK) { + LWIP_ASSERT("failed to create lock_tcpip_core", 0); + } +#endif /* LWIP_TCPIP_CORE_LOCKING */ + + sys_thread_new(TCPIP_THREAD_NAME, tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); +} + +/** + * Simple callback function used with tcpip_callback to free a pbuf + * (pbuf_free has a wrong signature for tcpip_callback) + * + * @param p The pbuf (chain) to be dereferenced. + */ +static void +pbuf_free_int(void *p) +{ + struct pbuf *q = (struct pbuf *)p; + pbuf_free(q); +} + +/** + * A simple wrapper function that allows you to free a pbuf from interrupt context. + * + * @param p The pbuf (chain) to be dereferenced. + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +pbuf_free_callback(struct pbuf *p) +{ + return tcpip_try_callback(pbuf_free_int, p); +} + +/** + * A simple wrapper function that allows you to free heap memory from + * interrupt context. + * + * @param m the heap memory to free + * @return ERR_OK if callback could be enqueued, an err_t if not + */ +err_t +mem_free_callback(void *m) +{ + return tcpip_try_callback(mem_free, m); +} + +#endif /* !NO_SYS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c new file mode 100644 index 0000000..f785ebe --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/apps/mqtt/mqtt.c @@ -0,0 +1,1463 @@ +/** + * @file + * MQTT client + * + * @defgroup mqtt MQTT client + * @ingroup apps + * @verbinclude mqtt_client.txt + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack + * + * Author: Erik Andersson + * + * + * @todo: + * - Handle large outgoing payloads for PUBLISH messages + * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) + * - Add support for legacy MQTT protocol version + * + * Please coordinate changes and requests with Erik Andersson + * Erik Andersson + * + */ +#include "lwip/apps/mqtt.h" +#include "lwip/apps/mqtt_priv.h" +#include "lwip/timeouts.h" +#include "lwip/ip_addr.h" +#include "lwip/mem.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include + +#if LWIP_TCP && LWIP_CALLBACK_API + +/** + * MQTT_DEBUG: Default is off. + */ +#if !defined MQTT_DEBUG || defined __DOXYGEN__ +#define MQTT_DEBUG LWIP_DBG_OFF +#endif + +#define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) +#define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) +#define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) +#define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) +#define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) + + + +/** + * MQTT client connection states + */ +enum { + TCP_DISCONNECTED, + TCP_CONNECTING, + MQTT_CONNECTING, + MQTT_CONNECTED +}; + +/** + * MQTT control message types + */ +enum mqtt_message_type { + MQTT_MSG_TYPE_CONNECT = 1, + MQTT_MSG_TYPE_CONNACK = 2, + MQTT_MSG_TYPE_PUBLISH = 3, + MQTT_MSG_TYPE_PUBACK = 4, + MQTT_MSG_TYPE_PUBREC = 5, + MQTT_MSG_TYPE_PUBREL = 6, + MQTT_MSG_TYPE_PUBCOMP = 7, + MQTT_MSG_TYPE_SUBSCRIBE = 8, + MQTT_MSG_TYPE_SUBACK = 9, + MQTT_MSG_TYPE_UNSUBSCRIBE = 10, + MQTT_MSG_TYPE_UNSUBACK = 11, + MQTT_MSG_TYPE_PINGREQ = 12, + MQTT_MSG_TYPE_PINGRESP = 13, + MQTT_MSG_TYPE_DISCONNECT = 14 +}; + +/** Helpers to extract control packet type and qos from first byte in fixed header */ +#define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) +#define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) + +/** + * MQTT connect flags, only used in CONNECT message + */ +enum mqtt_connect_flag { + MQTT_CONNECT_FLAG_USERNAME = 1 << 7, + MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, + MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, + MQTT_CONNECT_FLAG_WILL = 1 << 2, + MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 +}; + + +static void mqtt_cyclic_timer(void *arg); + +#if defined(LWIP_DEBUG) +static const char *const mqtt_message_type_str[15] = { + "UNDEFINED", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT" +}; + +/** + * Message type value to string + * @param msg_type see enum mqtt_message_type + * + * @return Control message type text string + */ +static const char * +mqtt_msg_type_to_str(u8_t msg_type) +{ + if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { + msg_type = 0; + } + return mqtt_message_type_str[msg_type]; +} + +#endif + + +/** + * Generate MQTT packet identifier + * @param client MQTT client + * @return New packet identifier, range 1 to 65535 + */ +static u16_t +msg_generate_packet_id(mqtt_client_t *client) +{ + client->pkt_id_seq++; + if (client->pkt_id_seq == 0) { + client->pkt_id_seq++; + } + return client->pkt_id_seq; +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output ring buffer */ + +/** Add single item to ring buffer */ +static void +mqtt_ringbuf_put(struct mqtt_ringbuf_t *rb, u8_t item) +{ + rb->buf[rb->put] = item; + rb->put++; + if (rb->put >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->put = 0; + } +} + +/** Return pointer to ring buffer get position */ +static u8_t * +mqtt_ringbuf_get_ptr(struct mqtt_ringbuf_t *rb) +{ + return &rb->buf[rb->get]; +} + +static void +mqtt_ringbuf_advance_get_idx(struct mqtt_ringbuf_t *rb, u16_t len) +{ + LWIP_ASSERT("mqtt_ringbuf_advance_get_idx: len < MQTT_OUTPUT_RINGBUF_SIZE", len < MQTT_OUTPUT_RINGBUF_SIZE); + + rb->get += len; + if (rb->get >= MQTT_OUTPUT_RINGBUF_SIZE) { + rb->get = rb->get - MQTT_OUTPUT_RINGBUF_SIZE; + } +} + +/** Return number of bytes in ring buffer */ +static u16_t +mqtt_ringbuf_len(struct mqtt_ringbuf_t *rb) +{ + u32_t len = rb->put - rb->get; + if (len > 0xFFFF) { + len += MQTT_OUTPUT_RINGBUF_SIZE; + } + return (u16_t)len; +} + +/** Return number of bytes free in ring buffer */ +#define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) + +/** Return number of bytes possible to read without wrapping around */ +#define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - (rb)->get)) + +/** + * Try send as many bytes as possible from output ring buffer + * @param rb Output ring buffer + * @param tpcb TCP connection handle + */ +static void +mqtt_output_send(struct mqtt_ringbuf_t *rb, struct altcp_pcb *tpcb) +{ + err_t err; + u8_t wrap = 0; + u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); + u16_t send_len = altcp_sndbuf(tpcb); + LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); + + if (send_len == 0 || ringbuf_lin_len == 0) { + return; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", + send_len, ringbuf_lin_len, rb->get, rb->put)); + + if (send_len > ringbuf_lin_len) { + /* Space in TCP output buffer is larger than available in ring buffer linear portion */ + send_len = ringbuf_lin_len; + /* Wrap around if more data in ring buffer after linear portion */ + wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); + } + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); + if ((err == ERR_OK) && wrap) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Use the lesser one of ring buffer linear length and TCP send buffer size */ + send_len = LWIP_MIN(altcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); + err = altcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); + } + + if (err == ERR_OK) { + mqtt_ringbuf_advance_get_idx(rb, send_len); + /* Flush */ + altcp_output(tpcb); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); + } +} + + + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Request queue */ + +/** + * Create request item + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + * @param pkt_id Packet identifier of request + * @param cb Packet callback to call when requests lifetime ends + * @param arg Parameter following callback + * @return Request or NULL if failed to create + */ +static struct mqtt_request_t * +mqtt_create_request(struct mqtt_request_t *r_objs, size_t r_objs_len, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r = NULL; + u8_t n; + LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item point to itself if not in use */ + if (r_objs[n].next == &r_objs[n]) { + r = &r_objs[n]; + r->next = NULL; + r->cb = cb; + r->arg = arg; + r->pkt_id = pkt_id; + break; + } + } + return r; +} + + +/** + * Append request to pending request queue + * @param tail Pointer to request queue tail pointer + * @param r Request to append + */ +static void +mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) +{ + struct mqtt_request_t *head = NULL; + s16_t time_before = 0; + struct mqtt_request_t *iter; + + LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); + + /* Iterate trough queue to find head, and count total timeout time */ + for (iter = *tail; iter != NULL; iter = iter->next) { + time_before += iter->timeout_diff; + head = iter; + } + + LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); + r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; + if (head == NULL) { + *tail = r; + } else { + head->next = r; + } +} + + +/** + * Delete request item + * @param r Request item to delete + */ +static void +mqtt_delete_request(struct mqtt_request_t *r) +{ + if (r != NULL) { + r->next = r; + } +} + +/** + * Remove a request item with a specific packet identifier from request queue + * @param tail Pointer to request queue tail pointer + * @param pkt_id Packet identifier of request to take + * @return Request item if found, NULL if not + */ +static struct mqtt_request_t * +mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) +{ + struct mqtt_request_t *iter = NULL, *prev = NULL; + LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); + /* Search all request for pkt_id */ + for (iter = *tail; iter != NULL; iter = iter->next) { + if (iter->pkt_id == pkt_id) { + break; + } + prev = iter; + } + + /* If request was found */ + if (iter != NULL) { + /* unchain */ + if (prev == NULL) { + *tail = iter->next; + } else { + prev->next = iter->next; + } + /* If exists, add remaining timeout time for the request to next */ + if (iter->next != NULL) { + iter->next->timeout_diff += iter->timeout_diff; + } + iter->next = NULL; + } + return iter; +} + +/** + * Handle requests timeout + * @param tail Pointer to request queue tail pointer + * @param t Time since last call in seconds + */ +static void +mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) +{ + struct mqtt_request_t *r; + LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); + r = *tail; + while (t > 0 && r != NULL) { + if (t >= r->timeout_diff) { + t -= (u8_t)r->timeout_diff; + /* Unchain */ + *tail = r->next; + /* Notify upper layer about timeout */ + if (r->cb != NULL) { + r->cb(r->arg, ERR_TIMEOUT); + } + mqtt_delete_request(r); + /* Tail might be be modified in callback, so re-read it in every iteration */ + r = *(struct mqtt_request_t *const volatile *)tail; + } else { + r->timeout_diff -= t; + t = 0; + } + } +} + +/** + * Free all request items + * @param tail Pointer to request queue tail pointer + */ +static void +mqtt_clear_requests(struct mqtt_request_t **tail) +{ + struct mqtt_request_t *iter, *next; + LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); + for (iter = *tail; iter != NULL; iter = next) { + next = iter->next; + mqtt_delete_request(iter); + } + *tail = NULL; +} +/** + * Initialize all request items + * @param r_objs Pointer to request objects + * @param r_objs_len Number of array entries + */ +static void +mqtt_init_requests(struct mqtt_request_t *r_objs, size_t r_objs_len) +{ + u8_t n; + LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); + for (n = 0; n < r_objs_len; n++) { + /* Item pointing to itself indicates unused */ + r_objs[n].next = &r_objs[n]; + } +} + +/*--------------------------------------------------------------------------------------------------------------------- */ +/* Output message build helpers */ + + +static void +mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) +{ + mqtt_ringbuf_put(rb, value); +} + +static +void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) +{ + mqtt_ringbuf_put(rb, value >> 8); + mqtt_ringbuf_put(rb, value & 0xff); +} + +static void +mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) +{ + u16_t n; + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); + } +} + +static void +mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) +{ + u16_t n; + mqtt_ringbuf_put(rb, length >> 8); + mqtt_ringbuf_put(rb, length & 0xff); + for (n = 0; n < length; n++) { + mqtt_ringbuf_put(rb, str[n]); + } +} + +/** + * Append fixed header + * @param rb Output ring buffer + * @param msg_type see enum mqtt_message_type + * @param fdup MQTT DUP flag + * @param fqos MQTT QoS field + * @param fretain MQTT retain flag + * @param r_length Remaining length after fixed header + */ + +static void +mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t fdup, + u8_t fqos, u8_t fretain, u16_t r_length) +{ + /* Start with control byte */ + mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((fdup & 1) << 3) | ((fqos & 3) << 1) | (fretain & 1))); + /* Encode remaining length field */ + do { + mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); + r_length >>= 7; + } while (r_length > 0); +} + + +/** + * Check output buffer space + * @param rb Output ring buffer + * @param r_length Remaining length after fixed header + * @return 1 if message will fit, 0 if not enough buffer space + */ +static u8_t +mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) +{ + /* Start with length of type byte + remaining length */ + u16_t total_len = 1 + r_length; + + LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); + + /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ + do { + total_len++; + r_length >>= 7; + } while (r_length > 0); + + return (total_len <= mqtt_ringbuf_free(rb)); +} + + +/** + * Close connection to server + * @param client MQTT client + * @param reason Reason for disconnection + */ +static void +mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) +{ + LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); + + /* Bring down TCP connection if not already done */ + if (client->conn != NULL) { + err_t res; + altcp_recv(client->conn, NULL); + altcp_err(client->conn, NULL); + altcp_sent(client->conn, NULL); + res = altcp_close(client->conn); + if (res != ERR_OK) { + altcp_abort(client->conn); + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_close: Close err=%s\n", lwip_strerr(res))); + } + client->conn = NULL; + } + + /* Remove all pending requests */ + mqtt_clear_requests(&client->pend_req_queue); + /* Stop cyclic timer */ + sys_untimeout(mqtt_cyclic_timer, client); + + /* Notify upper layer of disconnection if changed state */ + if (client->conn_state != TCP_DISCONNECTED) { + + client->conn_state = TCP_DISCONNECTED; + if (client->connect_cb != NULL) { + client->connect_cb(client, client->connect_arg, reason); + } + } +} + + +/** + * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states + * @param arg MQTT client + */ +static void +mqtt_cyclic_timer(void *arg) +{ + u8_t restart_timer = 1; + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); + + if (client->conn_state == MQTT_CONNECTING) { + client->cyclic_tick++; + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); + /* Disconnect TCP */ + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + } else if (client->conn_state == MQTT_CONNECTED) { + /* Handle timeout for pending requests */ + mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); + + /* keep_alive > 0 means keep alive functionality shall be used */ + if (client->keep_alive > 0) { + + client->server_watchdog++; + /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ + if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive / 2)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); + mqtt_close(client, MQTT_CONNECT_TIMEOUT); + restart_timer = 0; + } + + /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ + if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_cyclic_timer: Sending keep-alive message to server\n")); + if (mqtt_output_check_space(&client->output, 0) != 0) { + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); + client->cyclic_tick = 0; + } + } else { + client->cyclic_tick++; + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); + restart_timer = 0; + } + if (restart_timer) { + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, arg); + } +} + + +/** + * Send PUBACK, PUBREC or PUBREL response message + * @param client MQTT client + * @param msg PUBACK, PUBREC or PUBREL + * @param pkt_id Packet identifier + * @param qos QoS value + * @return ERR_OK if successful, ERR_MEM if out of memory + */ +static err_t +pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) +{ + err_t err = ERR_OK; + if (mqtt_output_check_space(&client->output, 2)) { + mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); + mqtt_output_append_u16(&client->output, pkt_id); + mqtt_output_send(&client->output, client->conn); + } else { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(msg), pkt_id)); + err = ERR_MEM; + } + return err; +} + +/** + * Subscribe response from server + * @param r Matching request + * @param result Result code from server + */ +static void +mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) +{ + if (r->cb != NULL) { + r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); + } +} + + +/** + * Complete MQTT message received or buffer full + * @param client MQTT client + * @param fixed_hdr_idx header index + * @param length length received part + * @param remaining_length Remaining length of complete message + */ +static mqtt_connection_status_t +mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) +{ + mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; + + u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; + size_t var_hdr_payload_bufsize = sizeof(client->rx_buffer) - fixed_hdr_idx; + + /* Control packet type */ + u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); + u16_t pkt_id = 0; + + LWIP_ASSERT("client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN", client->msg_idx < MQTT_VAR_HEADER_BUFFER_LEN); + LWIP_ASSERT("fixed_hdr_idx <= client->msg_idx", fixed_hdr_idx <= client->msg_idx); + LWIP_ERROR("buffer length mismatch", fixed_hdr_idx + length <= MQTT_VAR_HEADER_BUFFER_LEN, + return MQTT_CONNECT_DISCONNECTED); + + if (pkt_type == MQTT_MSG_TYPE_CONNACK) { + if (client->conn_state == MQTT_CONNECTING) { + if (length < 2) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short CONNACK message\n")); + goto out_disconnect; + } + /* Get result code from CONNACK */ + res = (mqtt_connection_status_t)var_hdr_payload[1]; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: Connect response code %d\n", res)); + if (res == MQTT_CONNECT_ACCEPTED) { + /* Reset cyclic_tick when changing to connected state */ + client->cyclic_tick = 0; + client->conn_state = MQTT_CONNECTED; + /* Notify upper layer */ + if (client->connect_cb != 0) { + client->connect_cb(client, client->connect_arg, res); + } + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Received CONNACK in connected state\n")); + } + } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ( "mqtt_message_received: Received PINGRESP from server\n")); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { + u16_t payload_offset = 0; + u16_t payload_length = length; + u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); + + if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { + /* Should have topic and pkt id*/ + u8_t *topic; + u16_t after_topic; + u8_t bkp; + u16_t topic_len; + u16_t qos_len = (qos ? 2U : 0U); + if (length < 2 + qos_len) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet\n")); + goto out_disconnect; + } + topic_len = var_hdr_payload[0]; + topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); + if ((topic_len > length - (2 + qos_len)) || + (topic_len > var_hdr_payload_bufsize - (2 + qos_len))) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (topic)\n")); + goto out_disconnect; + } + + topic = var_hdr_payload + 2; + after_topic = 2 + topic_len; + /* Check buffer length, add one byte even for QoS 0 so that zero termination will fit */ + if ((after_topic + (qos ? 2U : 1U)) > var_hdr_payload_bufsize) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); + goto out_disconnect; + } + + /* id for QoS 1 and 2 */ + if (qos > 0) { + if (length < after_topic + 2U) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short PUBLISH packet (after_topic)\n")); + goto out_disconnect; + } + client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; + after_topic += 2; + } else { + client->inpub_pkt_id = 0; + } + /* Take backup of byte after topic */ + bkp = topic[topic_len]; + /* Zero terminate string */ + topic[topic_len] = 0; + /* Payload data remaining in receive buffer */ + payload_length = length - after_topic; + payload_offset = after_topic; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %"U32_F"\n", + qos, topic, remaining_length + payload_length)); + if (client->pub_cb != NULL) { + client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); + } + /* Restore byte after topic */ + topic[topic_len] = bkp; + } + if (payload_length > 0 || remaining_length == 0) { + if (length < (size_t)(payload_offset + payload_length)) { + LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received short packet (payload)\n")); + goto out_disconnect; + } + client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); + /* Reply if QoS > 0 */ + if (remaining_length == 0 && qos > 0) { + /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ + u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", + mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); + pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); + } + } + } else { + /* Get packet identifier */ + pkt_id = (u16_t)var_hdr_payload[0] << 8; + pkt_id |= (u16_t)var_hdr_payload[1]; + if (pkt_id == 0) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: Got message with illegal packet identifier: 0\n")); + goto out_disconnect; + } + if (pkt_type == MQTT_MSG_TYPE_PUBREC) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); + + } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n", pkt_id)); + pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); + + } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || + pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { + struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); + if (r != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + if (pkt_type == MQTT_MSG_TYPE_SUBACK) { + if (length < 3) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_message_received: To small SUBACK packet\n")); + goto out_disconnect; + } else { + mqtt_incomming_suback(r, var_hdr_payload[2]); + } + } else if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); + } + } else { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); + goto out_disconnect; + } + } + return res; +out_disconnect: + return MQTT_CONNECT_DISCONNECTED; +} + + +/** + * MQTT incoming message parser + * @param client MQTT client + * @param p PBUF chain of received data + * @return Connection status + */ +static mqtt_connection_status_t +mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) +{ + u16_t in_offset = 0; + u32_t msg_rem_len = 0; + u8_t fixed_hdr_idx = 0; + u8_t b = 0; + + while (p->tot_len > in_offset) { + /* We ALWAYS parse the header here first. Even if the header was not + included in this segment, we re-parse it here by buffering it in + client->rx_buffer. client->msg_idx keeps track of this. */ + if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { + + if (fixed_hdr_idx < client->msg_idx) { + /* parse header from old pbuf (buffered in client->rx_buffer) */ + b = client->rx_buffer[fixed_hdr_idx]; + } else { + /* parse header from this pbuf and save it in client->rx_buffer in case + it comes in segmented */ + b = pbuf_get_at(p, in_offset++); + client->rx_buffer[client->msg_idx++] = b; + } + fixed_hdr_idx++; + + if (fixed_hdr_idx >= 2) { + /* fixed header contains at least 2 bytes but can contain more, depending on + 'remaining length'. All bytes but the last of this have 0x80 set to + indicate more bytes are coming. */ + msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); + if ((b & 0x80) == 0) { + /* fixed header is done */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: Remaining length after fixed header: %"U32_F"\n", msg_rem_len)); + if (msg_rem_len == 0) { + /* Complete message with no extra headers of payload received */ + mqtt_message_received(client, fixed_hdr_idx, 0, 0); + client->msg_idx = 0; + fixed_hdr_idx = 0; + } else { + /* Bytes remaining in message (changes remaining length if this is + not the first segment of this message) */ + msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; + } + } + } + } else { + /* Fixed header has been parsed, parse variable header */ + u16_t cpy_len, cpy_start, buffer_space; + + cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; + + /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ + cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); + + /* Limit to available space in buffer */ + buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; + if (cpy_len > buffer_space) { + cpy_len = buffer_space; + } + pbuf_copy_partial(p, client->rx_buffer + cpy_start, cpy_len, in_offset); + + /* Advance get and put indexes */ + client->msg_idx += cpy_len; + in_offset += cpy_len; + msg_rem_len -= cpy_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_parse_incoming: msg_idx: %"U32_F", cpy_len: %"U16_F", remaining %"U32_F"\n", client->msg_idx, cpy_len, msg_rem_len)); + if ((msg_rem_len == 0) || (cpy_len == buffer_space)) { + /* Whole message received or buffer is full */ + mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); + if (res != MQTT_CONNECT_ACCEPTED) { + return res; + } + if (msg_rem_len == 0) { + /* Reset parser state */ + client->msg_idx = 0; + /* msg_tot_len = 0; */ + fixed_hdr_idx = 0; + } + } + } + } + return MQTT_CONNECT_ACCEPTED; +} + + +/** + * TCP received callback function. @see tcp_recv_fn + * @param arg MQTT client + * @param p PBUF chain of received data + * @param err Passed as return value if not ERR_OK + * @return ERR_OK or err passed into callback + */ +static err_t +mqtt_tcp_recv_cb(void *arg, struct altcp_pcb *pcb, struct pbuf *p, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); + + if (p == NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); + } else { + mqtt_connection_status_t res; + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_recv_cb: Recv err=%d\n", err)); + pbuf_free(p); + return err; + } + + /* Tell remote that data has been received */ + altcp_recved(pcb, p->tot_len); + res = mqtt_parse_incoming(client, p); + pbuf_free(p); + + if (res != MQTT_CONNECT_ACCEPTED) { + mqtt_close(client, res); + } + /* If keep alive functionality is used */ + if (client->keep_alive != 0) { + /* Reset server alive watchdog */ + client->server_watchdog = 0; + } + + } + return ERR_OK; +} + + +/** + * TCP data sent callback function. @see tcp_sent_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @param len Number of bytes sent + * @return ERR_OK + */ +static err_t +mqtt_tcp_sent_cb(void *arg, struct altcp_pcb *tpcb, u16_t len) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + LWIP_UNUSED_ARG(tpcb); + LWIP_UNUSED_ARG(len); + + if (client->conn_state == MQTT_CONNECTED) { + struct mqtt_request_t *r; + + /* Reset keep-alive send timer and server watchdog */ + client->cyclic_tick = 0; + client->server_watchdog = 0; + /* QoS 0 publish has no response from server, so call its callbacks here */ + while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); + if (r->cb != NULL) { + r->cb(r->arg, ERR_OK); + } + mqtt_delete_request(r); + } + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, client->conn); + } + return ERR_OK; +} + +/** + * TCP error callback function. @see tcp_err_fn + * @param arg MQTT client + * @param err Error encountered + */ +static void +mqtt_tcp_err_cb(void *arg, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + LWIP_UNUSED_ARG(err); /* only used for debug output */ + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); + LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); + /* Set conn to null before calling close as pcb is already deallocated*/ + client->conn = 0; + mqtt_close(client, MQTT_CONNECT_DISCONNECTED); +} + +/** + * TCP poll callback function. @see tcp_poll_fn + * @param arg MQTT client + * @param tpcb TCP connection handle + * @return err ERR_OK + */ +static err_t +mqtt_tcp_poll_cb(void *arg, struct altcp_pcb *tpcb) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + if (client->conn_state == MQTT_CONNECTED) { + /* Try send any remaining buffers from output queue */ + mqtt_output_send(&client->output, tpcb); + } + return ERR_OK; +} + +/** + * TCP connect callback function. @see tcp_connected_fn + * @param arg MQTT client + * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error + * @return ERR_OK + */ +static err_t +mqtt_tcp_connect_cb(void *arg, struct altcp_pcb *tpcb, err_t err) +{ + mqtt_client_t *client = (mqtt_client_t *)arg; + + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); + return err; + } + + /* Initiate receiver state */ + client->msg_idx = 0; + + /* Setup TCP callbacks */ + altcp_recv(tpcb, mqtt_tcp_recv_cb); + altcp_sent(tpcb, mqtt_tcp_sent_cb); + altcp_poll(tpcb, mqtt_tcp_poll_cb, 2); + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_tcp_connect_cb: TCP connection established to server\n")); + /* Enter MQTT connect state */ + client->conn_state = MQTT_CONNECTING; + + /* Start cyclic timer */ + sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL * 1000, mqtt_cyclic_timer, client); + client->cyclic_tick = 0; + + /* Start transmission from output queue, connect message is the first one out*/ + mqtt_output_send(&client->output, client->conn); + + return ERR_OK; +} + + + +/*---------------------------------------------------------------------------------------------------- */ +/* Public API */ + + +/** + * @ingroup mqtt + * MQTT publish function. + * @param client MQTT client + * @param topic Publish topic string + * @param payload Data to publish (NULL is allowed) + * @param payload_length Length of payload (0 is allowed) + * @param qos Quality of service, 0 1 or 2 + * @param retain MQTT retain flag + * @param cb Callback to call when publish is complete or has timed out + * @param arg User supplied argument to publish callback + * @return ERR_OK if successful + * ERR_CONN if client is disconnected + * ERR_MEM if short on memory + */ +err_t +mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg) +{ + struct mqtt_request_t *r; + u16_t pkt_id; + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_publish: client != NULL", client); + LWIP_ASSERT("mqtt_publish: topic != NULL", topic); + LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + total_len = 2 + topic_len + payload_length; + + if (qos > 0) { + total_len += 2; + /* Generate pkt_id id for QoS1 and 2 */ + pkt_id = msg_generate_packet_id(client); + } else { + /* Use reserved value pkt_id 0 for QoS 0 in request handle */ + pkt_id = 0; + } + LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); + + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); + + /* Append Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + + /* Append packet if for QoS 1 and 2*/ + if (qos > 0) { + mqtt_output_append_u16(&client->output, pkt_id); + } + + /* Append optional publish payload */ + if ((payload != NULL) && (payload_length > 0)) { + mqtt_output_append_buf(&client->output, payload, payload_length); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * MQTT subscribe/unsubscribe function. + * @param client MQTT client + * @param topic topic to subscribe to + * @param qos Quality of service, 0 1 or 2 (only used for subscribe) + * @param cb Callback to call when subscribe/unsubscribe reponse is received + * @param arg User supplied argument to publish callback + * @param sub 1 for subscribe, 0 for unsubscribe + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) +{ + size_t topic_strlen; + size_t total_len; + u16_t topic_len; + u16_t remaining_length; + u16_t pkt_id; + struct mqtt_request_t *r; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); + LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); + + topic_strlen = strlen(topic); + LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); + topic_len = (u16_t)topic_strlen; + /* Topic string, pkt_id, qos for subscribe */ + total_len = topic_len + 2 + 2 + (sub != 0); + LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); + remaining_length = (u16_t)total_len; + + LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); + if (client->conn_state == TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); + return ERR_CONN; + } + + pkt_id = msg_generate_packet_id(client); + r = mqtt_create_request(client->req_list, LWIP_ARRAYSIZE(client->req_list), pkt_id, cb, arg); + if (r == NULL) { + return ERR_MEM; + } + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + mqtt_delete_request(r); + return ERR_MEM; + } + + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); + + mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); + /* Packet id */ + mqtt_output_append_u16(&client->output, pkt_id); + /* Topic */ + mqtt_output_append_string(&client->output, topic, topic_len); + /* QoS */ + if (sub != 0) { + mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); + } + + mqtt_append_request(&client->pend_req_queue, r); + mqtt_output_send(&client->output, client->conn); + return ERR_OK; +} + + +/** + * @ingroup mqtt + * Set callback to handle incoming publish requests from server + * @param client MQTT client + * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload + * @param data_cb Callback for each fragment of payload that arrives + * @param arg User supplied argument to both callbacks + */ +void +mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, + mqtt_incoming_data_cb_t data_cb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); + client->data_cb = data_cb; + client->pub_cb = pub_cb; + client->inpub_arg = arg; +} + +/** + * @ingroup mqtt + * Create a new MQTT client instance + * @return Pointer to instance on success, NULL otherwise + */ +mqtt_client_t * +mqtt_client_new(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + return (mqtt_client_t *)mem_calloc(1, sizeof(mqtt_client_t)); +} + +/** + * @ingroup mqtt + * Free MQTT client instance + * @param client Pointer to instance to be freed + */ +void +mqtt_client_free(mqtt_client_t *client) +{ + mem_free(client); +} + +/** + * @ingroup mqtt + * Connect to MQTT server + * @param client MQTT client + * @param ip_addr Server IP + * @param port Server port + * @param cb Connection state change callback + * @param arg User supplied argument to connection callback + * @param client_info Client identification and connection options + * @return ERR_OK if successful, @see err_t enum for other results + */ +err_t +mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info) +{ + err_t err; + size_t len; + u16_t client_id_length; + /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ + u16_t remaining_length = 2 + 4 + 1 + 1 + 2; + u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; + u16_t client_user_len = 0, client_pass_len = 0; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); + LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); + LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); + + if (client->conn_state != TCP_DISCONNECTED) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Already connected\n")); + return ERR_ISCONN; + } + + /* Wipe clean */ + memset(client, 0, sizeof(mqtt_client_t)); + client->connect_arg = arg; + client->connect_cb = cb; + client->keep_alive = client_info->keep_alive; + mqtt_init_requests(client->req_list, LWIP_ARRAYSIZE(client->req_list)); + + /* Build connect message */ + if (client_info->will_topic != NULL && client_info->will_msg != NULL) { + flags |= MQTT_CONNECT_FLAG_WILL; + flags |= (client_info->will_qos & 3) << 3; + if (client_info->will_retain) { + flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; + } + len = strlen(client_info->will_topic); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); + will_topic_len = (u8_t)len; + len = strlen(client_info->will_msg); + LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); + will_msg_len = (u8_t)len; + len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_user != NULL) { + flags |= MQTT_CONNECT_FLAG_USERNAME; + len = strlen(client_info->client_user); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_user length must be > 0", len > 0, return ERR_VAL); + client_user_len = (u16_t)len; + len = remaining_length + 2 + client_user_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + if (client_info->client_pass != NULL) { + flags |= MQTT_CONNECT_FLAG_PASSWORD; + len = strlen(client_info->client_pass); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length overflow", len <= 0xFFFF, return ERR_VAL); + LWIP_ERROR("mqtt_client_connect: client_info->client_pass length must be > 0", len > 0, return ERR_VAL); + client_pass_len = (u16_t)len; + len = remaining_length + 2 + client_pass_len; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + } + + /* Don't complicate things, always connect using clean session */ + flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; + + len = strlen(client_info->client_id); + LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); + client_id_length = (u16_t)len; + len = remaining_length + 2 + client_id_length; + LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); + remaining_length = (u16_t)len; + + if (mqtt_output_check_space(&client->output, remaining_length) == 0) { + return ERR_MEM; + } + +#if LWIP_ALTCP && LWIP_ALTCP_TLS + if (client_info->tls_config) { + client->conn = altcp_tls_new(client_info->tls_config, IP_GET_TYPE(ip_addr)); + } else +#endif + { + client->conn = altcp_tcp_new_ip_type(IP_GET_TYPE(ip_addr)); + } + if (client->conn == NULL) { + return ERR_MEM; + } + + /* Set arg pointer for callbacks */ + altcp_arg(client->conn, client); + /* Any local address, pick random local port number */ + err = altcp_bind(client->conn, IP_ADDR_ANY, 0); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); + goto tcp_fail; + } + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); + + /* Connect to server */ + err = altcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); + if (err != ERR_OK) { + LWIP_DEBUGF(MQTT_DEBUG_TRACE, ("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); + goto tcp_fail; + } + /* Set error callback */ + altcp_err(client->conn, mqtt_tcp_err_cb); + client->conn_state = TCP_CONNECTING; + + /* Append fixed header */ + mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); + /* Append Protocol string */ + mqtt_output_append_string(&client->output, "MQTT", 4); + /* Append Protocol level */ + mqtt_output_append_u8(&client->output, 4); + /* Append connect flags */ + mqtt_output_append_u8(&client->output, flags); + /* Append keep-alive */ + mqtt_output_append_u16(&client->output, client_info->keep_alive); + /* Append client id */ + mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); + /* Append will message if used */ + if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { + mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); + mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); + } + /* Append user name if given */ + if ((flags & MQTT_CONNECT_FLAG_USERNAME) != 0) { + mqtt_output_append_string(&client->output, client_info->client_user, client_user_len); + } + /* Append password if given */ + if ((flags & MQTT_CONNECT_FLAG_PASSWORD) != 0) { + mqtt_output_append_string(&client->output, client_info->client_pass, client_pass_len); + } + return ERR_OK; + +tcp_fail: + altcp_abort(client->conn); + client->conn = NULL; + return err; +} + + +/** + * @ingroup mqtt + * Disconnect from MQTT server + * @param client MQTT client + */ +void +mqtt_disconnect(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_disconnect: client != NULL", client); + /* If connection in not already closed */ + if (client->conn_state != TCP_DISCONNECTED) { + /* Set conn_state before calling mqtt_close to prevent callback from being called */ + client->conn_state = TCP_DISCONNECTED; + mqtt_close(client, (mqtt_connection_status_t)0); + } +} + +/** + * @ingroup mqtt + * Check connection with server + * @param client MQTT client + * @return 1 if connected to server, 0 otherwise + */ +u8_t +mqtt_client_is_connected(mqtt_client_t *client) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); + return client->conn_state == MQTT_CONNECTED; +} + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c new file mode 100644 index 0000000..d42b727 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp.c @@ -0,0 +1,681 @@ +/** + * @file + * @defgroup altcp Application layered TCP Functions + * @ingroup altcp_api + * + * This file contains the common functions for altcp to work. + * For more details see @ref altcp_api. + */ + +/** + * @defgroup altcp_api Application layered TCP Introduction + * @ingroup callbackstyle_api + * + * Overview + * -------- + * altcp (application layered TCP connection API; to be used from TCPIP thread) + * is an abstraction layer that prevents applications linking hard against the + * @ref tcp.h functions while providing the same functionality. It is used to + * e.g. add SSL/TLS (see LWIP_ALTCP_TLS) or proxy-connect support to an application + * written for the tcp callback API without that application knowing the + * protocol details. + * + * * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * * This is achieved by simply including "lwip/altcp.h" instead of "lwip/tcp.h", + * replacing "struct tcp_pcb" with "struct altcp_pcb" and prefixing all functions + * with "altcp_" instead of "tcp_". + * + * With altcp support disabled (LWIP_ALTCP==0), applications written against the + * altcp API can still be compiled but are directly linked against the tcp.h + * callback API and then cannot use layered protocols. To minimize code changes + * in this case, the use of altcp_allocators is strongly suggested. + * + * Usage + * ----- + * To make use of this API from an existing tcp raw API application: + * * Include "lwip/altcp.h" instead of "lwip/tcp.h" + * * Replace "struct tcp_pcb" with "struct altcp_pcb" + * * Prefix all called tcp API functions with "altcp_" instead of "tcp_" to link + * against the altcp functions + * * @ref altcp_new (and @ref altcp_new_ip_type/@ref altcp_new_ip6) take + * an @ref altcp_allocator_t as an argument, whereas the original tcp API + * functions take no arguments. + * * An @ref altcp_allocator_t allocator is an object that holds a pointer to an + * allocator object and a corresponding state (e.g. for TLS, the corresponding + * state may hold certificates or keys). This way, the application does not + * even need to know if it uses TLS or pure TCP, this is handled at runtime + * by passing a specific allocator. + * * An application can alternatively bind hard to the altcp_tls API by calling + * @ref altcp_tls_new or @ref altcp_tls_wrap. + * * The TLS layer is not directly implemented by lwIP, but a port to mbedTLS is + * provided. + * * Another altcp layer is proxy-connect to use TLS behind a HTTP proxy (see + * @ref altcp_proxyconnect.h) + * + * altcp_allocator_t + * ----------------- + * An altcp allocator is created by the application by combining an allocator + * callback function and a corresponding state, e.g.:\code{.c} + * static const unsigned char cert[] = {0x2D, ... (see mbedTLS doc for how to create this)}; + * struct altcp_tls_config * conf = altcp_tls_create_config_client(cert, sizeof(cert)); + * altcp_allocator_t tls_allocator = { + * altcp_tls_alloc, conf + * }; + * \endcode + * + * + * struct altcp_tls_config + * ----------------------- + * The struct altcp_tls_config holds state that is needed to create new TLS client + * or server connections (e.g. certificates and private keys). + * + * It is not defined by lwIP itself but by the TLS port (e.g. altcp_tls to mbedTLS + * adaption). However, the parameters used to create it are defined in @ref + * altcp_tls.h (see @ref altcp_tls_create_config_server_privkey_cert for servers + * and @ref altcp_tls_create_config_client/@ref altcp_tls_create_config_client_2wayauth + * for clients). + * + * For mbedTLS, ensure that certificates can be parsed by 'mbedtls_x509_crt_parse()' and + * private keys can be parsed by 'mbedtls_pk_parse_key()'. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/altcp_tcp.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +extern const struct altcp_functions altcp_tcp_functions; + +/** + * For altcp layer implementations only: allocate a new struct altcp_pcb from the pool + * and zero the memory + */ +struct altcp_pcb * +altcp_alloc(void) +{ + struct altcp_pcb *ret = (struct altcp_pcb *)memp_malloc(MEMP_ALTCP_PCB); + if (ret != NULL) { + memset(ret, 0, sizeof(struct altcp_pcb)); + } + return ret; +} + +/** + * For altcp layer implementations only: return a struct altcp_pcb to the pool + */ +void +altcp_free(struct altcp_pcb *conn) +{ + if (conn) { + if (conn->fns && conn->fns->dealloc) { + conn->fns->dealloc(conn); + } + memp_free(MEMP_ALTCP_PCB, conn); + } +} + +/** + * @ingroup altcp + * altcp_new_ip6: @ref altcp_new for IPv6 + */ +struct altcp_pcb * +altcp_new_ip6(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V6); +} + +/** + * @ingroup altcp + * altcp_new: @ref altcp_new for IPv4 + */ +struct altcp_pcb * +altcp_new(altcp_allocator_t *allocator) +{ + return altcp_new_ip_type(allocator, IPADDR_TYPE_V4); +} + +/** + * @ingroup altcp + * altcp_new_ip_type: called by applications to allocate a new pcb with the help of an + * allocator function. + * + * @param allocator allocator function and argument + * @param ip_type IP version of the pcb (@ref lwip_ip_addr_type) + * @return a new altcp_pcb or NULL on error + */ +struct altcp_pcb * +altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type) +{ + struct altcp_pcb *conn; + if (allocator == NULL) { + /* no allocator given, create a simple TCP connection */ + return altcp_tcp_new_ip_type(ip_type); + } + if (allocator->alloc == NULL) { + /* illegal allocator */ + return NULL; + } + conn = allocator->alloc(allocator->arg, ip_type); + if (conn == NULL) { + /* allocation failed */ + return NULL; + } + return conn; +} + +/** + * @ingroup altcp + * @see tcp_arg() + */ +void +altcp_arg(struct altcp_pcb *conn, void *arg) +{ + if (conn) { + conn->arg = arg; + } +} + +/** + * @ingroup altcp + * @see tcp_accept() + */ +void +altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept) +{ + if (conn != NULL) { + conn->accept = accept; + } +} + +/** + * @ingroup altcp + * @see tcp_recv() + */ +void +altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv) +{ + if (conn) { + conn->recv = recv; + } +} + +/** + * @ingroup altcp + * @see tcp_sent() + */ +void +altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent) +{ + if (conn) { + conn->sent = sent; + } +} + +/** + * @ingroup altcp + * @see tcp_poll() + */ +void +altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval) +{ + if (conn) { + conn->poll = poll; + conn->pollinterval = interval; + if (conn->fns && conn->fns->set_poll) { + conn->fns->set_poll(conn, interval); + } + } +} + +/** + * @ingroup altcp + * @see tcp_err() + */ +void +altcp_err(struct altcp_pcb *conn, altcp_err_fn err) +{ + if (conn) { + conn->err = err; + } +} + +/* Generic functions calling the "virtual" ones */ + +/** + * @ingroup altcp + * @see tcp_recved() + */ +void +altcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->fns && conn->fns->recved) { + conn->fns->recved(conn, len); + } +} + +/** + * @ingroup altcp + * @see tcp_bind() + */ +err_t +altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->fns && conn->fns->bind) { + return conn->fns->bind(conn, ipaddr, port); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_connect() + */ +err_t +altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + if (conn && conn->fns && conn->fns->connect) { + return conn->fns->connect(conn, ipaddr, port, connected); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_listen_with_backlog_and_err() + */ +struct altcp_pcb * +altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + if (conn && conn->fns && conn->fns->listen) { + return conn->fns->listen(conn, backlog, err); + } + return NULL; +} + +/** + * @ingroup altcp + * @see tcp_abort() + */ +void +altcp_abort(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->abort) { + conn->fns->abort(conn); + } +} + +/** + * @ingroup altcp + * @see tcp_close() + */ +err_t +altcp_close(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->close) { + return conn->fns->close(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_shutdown() + */ +err_t +altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn && conn->fns && conn->fns->shutdown) { + return conn->fns->shutdown(conn, shut_rx, shut_tx); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_write() + */ +err_t +altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->fns && conn->fns->write) { + return conn->fns->write(conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_output() + */ +err_t +altcp_output(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->output) { + return conn->fns->output(conn); + } + return ERR_VAL; +} + +/** + * @ingroup altcp + * @see tcp_mss() + */ +u16_t +altcp_mss(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->mss) { + return conn->fns->mss(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndbuf() + */ +u16_t +altcp_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndbuf) { + return conn->fns->sndbuf(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_sndqueuelen() + */ +u16_t +altcp_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->sndqueuelen) { + return conn->fns->sndqueuelen(conn); + } + return 0; +} + +void +altcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disable) { + conn->fns->nagle_disable(conn); + } +} + +void +altcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_enable) { + conn->fns->nagle_enable(conn); + } +} + +int +altcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->nagle_disabled) { + return conn->fns->nagle_disabled(conn); + } + return 0; +} + +/** + * @ingroup altcp + * @see tcp_setprio() + */ +void +altcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->fns && conn->fns->setprio) { + conn->fns->setprio(conn, prio); + } +} + +err_t +altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->fns && conn->fns->addrinfo) { + return conn->fns->addrinfo(conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getip) { + return conn->fns->getip(conn, local); + } + return NULL; +} + +u16_t +altcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->fns && conn->fns->getport) { + return conn->fns->getport(conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->fns && conn->fns->dbg_get_tcp_state) { + return conn->fns->dbg_get_tcp_state(conn); + } + return CLOSED; +} +#endif + +/* Default implementations for the "virtual" functions */ + +void +altcp_default_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn && conn->inner_conn) { + altcp_poll(conn->inner_conn, conn->poll, interval); + } +} + +void +altcp_default_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn && conn->inner_conn) { + altcp_recved(conn->inner_conn, len); + } +} + +err_t +altcp_default_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + if (conn && conn->inner_conn) { + return altcp_bind(conn->inner_conn, ipaddr, port); + } + return ERR_VAL; +} + +err_t +altcp_default_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + if (conn) { + if (shut_rx && shut_tx && conn->fns && conn->fns->close) { + /* default shutdown for both sides is close */ + return conn->fns->close(conn); + } + if (conn->inner_conn) { + return altcp_shutdown(conn->inner_conn, shut_rx, shut_tx); + } + } + return ERR_VAL; +} + +err_t +altcp_default_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + if (conn && conn->inner_conn) { + return altcp_write(conn->inner_conn, dataptr, len, apiflags); + } + return ERR_VAL; +} + +err_t +altcp_default_output(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_output(conn->inner_conn); + } + return ERR_VAL; +} + +u16_t +altcp_default_mss(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_mss(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndbuf(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndbuf(conn->inner_conn); + } + return 0; +} + +u16_t +altcp_default_sndqueuelen(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_sndqueuelen(conn->inner_conn); + } + return 0; +} + +void +altcp_default_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_disable(conn->inner_conn); + } +} + +void +altcp_default_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + altcp_nagle_enable(conn->inner_conn); + } +} + +int +altcp_default_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_nagle_disabled(conn->inner_conn); + } + return 0; +} + +void +altcp_default_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn && conn->inner_conn) { + altcp_setprio(conn->inner_conn, prio); + } +} + +void +altcp_default_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + /* nothing to do */ +} + +err_t +altcp_default_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn && conn->inner_conn) { + return altcp_get_tcp_addrinfo(conn->inner_conn, local, addr, port); + } + return ERR_VAL; +} + +ip_addr_t * +altcp_default_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_ip(conn->inner_conn, local); + } + return NULL; +} + +u16_t +altcp_default_get_port(struct altcp_pcb *conn, int local) +{ + if (conn && conn->inner_conn) { + return altcp_get_port(conn->inner_conn, local); + } + return 0; +} + +#ifdef LWIP_DEBUG +enum tcp_state +altcp_default_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn && conn->inner_conn) { + return altcp_dbg_get_tcp_state(conn->inner_conn); + } + return CLOSED; +} +#endif + + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c new file mode 100644 index 0000000..8c2390e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_alloc.c @@ -0,0 +1,87 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains allocation implementation that combine several layers. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/altcp_tls.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/mem.h" + +#include + +#if LWIP_ALTCP_TLS + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type) +{ + struct altcp_pcb *inner_conn, *ret; + LWIP_UNUSED_ARG(ip_type); + + inner_conn = altcp_tcp_new_ip_type(ip_type); + if (inner_conn == NULL) { + return NULL; + } + ret = altcp_tls_wrap(config, inner_conn); + if (ret == NULL) { + altcp_close(inner_conn); + } + return ret; +} + +/** This standard allocator function creates an altcp pcb for + * TLS over TCP */ +struct altcp_pcb * +altcp_tls_alloc(void *arg, u8_t ip_type) +{ + return altcp_tls_new((struct altcp_tls_config *)arg, ip_type); +} + +#endif /* LWIP_ALTCP_TLS */ + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c new file mode 100644 index 0000000..f5b8e84 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/altcp_tcp.c @@ -0,0 +1,543 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" +#include "lwip/altcp_tcp.h" +#include "lwip/priv/altcp_priv.h" +#include "lwip/tcp.h" +#include "lwip/mem.h" + +#include + +#define ALTCP_TCP_ASSERT_CONN(conn) do { \ + LWIP_ASSERT("conn->inner_conn == NULL", (conn)->inner_conn == NULL); \ + LWIP_UNUSED_ARG(conn); /* for LWIP_NOASSERT */ } while(0) +#define ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb) do { \ + LWIP_ASSERT("pcb mismatch", (conn)->state == tpcb); \ + LWIP_UNUSED_ARG(tpcb); /* for LWIP_NOASSERT */ \ + ALTCP_TCP_ASSERT_CONN(conn); } while(0) + + +/* Variable prototype, the actual declaration is at the end of this file + since it contains pointers to static functions declared here */ +extern const struct altcp_functions altcp_tcp_functions; + +static void altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb); + +/* callback functions for TCP */ +static err_t +altcp_tcp_accept(void *arg, struct tcp_pcb *new_tpcb, err_t err) +{ + struct altcp_pcb *listen_conn = (struct altcp_pcb *)arg; + if (listen_conn && listen_conn->accept) { + /* create a new altcp_conn to pass to the next 'accept' callback */ + struct altcp_pcb *new_conn = altcp_alloc(); + if (new_conn == NULL) { + return ERR_MEM; + } + altcp_tcp_setup(new_conn, new_tpcb); + return listen_conn->accept(listen_conn->arg, new_conn, err); + } + return ERR_ARG; +} + +static err_t +altcp_tcp_connected(void *arg, struct tcp_pcb *tpcb, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->connected) { + return conn->connected(conn->arg, conn, err); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->recv) { + return conn->recv(conn->arg, conn, p, err); + } + } + if (p != NULL) { + /* prevent memory leaks */ + pbuf_free(p); + } + return ERR_OK; +} + +static err_t +altcp_tcp_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->sent) { + return conn->sent(conn->arg, conn, len); + } + } + return ERR_OK; +} + +static err_t +altcp_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + ALTCP_TCP_ASSERT_CONN_PCB(conn, tpcb); + if (conn->poll) { + return conn->poll(conn->arg, conn); + } + } + return ERR_OK; +} + +static void +altcp_tcp_err(void *arg, err_t err) +{ + struct altcp_pcb *conn = (struct altcp_pcb *)arg; + if (conn) { + conn->state = NULL; /* already freed */ + if (conn->err) { + conn->err(conn->arg, err); + } + altcp_free(conn); + } +} + +/* setup functions */ + +static void +altcp_tcp_remove_callbacks(struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_err(tpcb, NULL); + tcp_poll(tpcb, NULL, tpcb->pollinterval); +} + +static void +altcp_tcp_setup_callbacks(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + tcp_arg(tpcb, conn); + tcp_recv(tpcb, altcp_tcp_recv); + tcp_sent(tpcb, altcp_tcp_sent); + tcp_err(tpcb, altcp_tcp_err); + /* tcp_poll is set when interval is set by application */ + /* listen is set totally different :-) */ +} + +static void +altcp_tcp_setup(struct altcp_pcb *conn, struct tcp_pcb *tpcb) +{ + altcp_tcp_setup_callbacks(conn, tpcb); + conn->state = tpcb; + conn->fns = &altcp_tcp_functions; +} + +struct altcp_pcb * +altcp_tcp_new_ip_type(u8_t ip_type) +{ + /* Allocate the tcp pcb first to invoke the priority handling code + if we're out of pcbs */ + struct tcp_pcb *tpcb = tcp_new_ip_type(ip_type); + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } else { + /* altcp_pcb allocation failed -> free the tcp_pcb too */ + tcp_close(tpcb); + } + } + return NULL; +} + +/** altcp_tcp allocator function fitting to @ref altcp_allocator_t / @ref altcp_new. +* +* arg pointer is not used for TCP. +*/ +struct altcp_pcb * +altcp_tcp_alloc(void *arg, u8_t ip_type) +{ + LWIP_UNUSED_ARG(arg); + return altcp_tcp_new_ip_type(ip_type); +} + +struct altcp_pcb * +altcp_tcp_wrap(struct tcp_pcb *tpcb) +{ + if (tpcb != NULL) { + struct altcp_pcb *ret = altcp_alloc(); + if (ret != NULL) { + altcp_tcp_setup(ret, tpcb); + return ret; + } + } + return NULL; +} + + +/* "virtual" functions calling into tcp */ +static void +altcp_tcp_set_poll(struct altcp_pcb *conn, u8_t interval) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_poll(pcb, altcp_tcp_poll, interval); + } +} + +static void +altcp_tcp_recved(struct altcp_pcb *conn, u16_t len) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_recved(pcb, len); + } +} + +static err_t +altcp_tcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_bind(pcb, ipaddr, port); +} + +static err_t +altcp_tcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + conn->connected = connected; + pcb = (struct tcp_pcb *)conn->state; + return tcp_connect(pcb, ipaddr, port, altcp_tcp_connected); +} + +static struct altcp_pcb * +altcp_tcp_listen(struct altcp_pcb *conn, u8_t backlog, err_t *err) +{ + struct tcp_pcb *pcb; + struct tcp_pcb *lpcb; + if (conn == NULL) { + return NULL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + lpcb = tcp_listen_with_backlog_and_err(pcb, backlog, err); + if (lpcb != NULL) { + conn->state = lpcb; + tcp_accept(lpcb, altcp_tcp_accept); + return conn; + } + return NULL; +} + +static void +altcp_tcp_abort(struct altcp_pcb *conn) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + tcp_abort(pcb); + } + } +} + +static err_t +altcp_tcp_close(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + if (pcb) { + err_t err; + tcp_poll_fn oldpoll = pcb->poll; + altcp_tcp_remove_callbacks(pcb); + err = tcp_close(pcb); + if (err != ERR_OK) { + /* not closed, set up all callbacks again */ + altcp_tcp_setup_callbacks(conn, pcb); + /* poll callback is not included in the above */ + tcp_poll(pcb, oldpoll, pcb->pollinterval); + return err; + } + conn->state = NULL; /* unsafe to reference pcb after tcp_close(). */ + } + altcp_free(conn); + return ERR_OK; +} + +static err_t +altcp_tcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_shutdown(pcb, shut_rx, shut_tx); +} + +static err_t +altcp_tcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_write(pcb, dataptr, len, apiflags); +} + +static err_t +altcp_tcp_output(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return ERR_VAL; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_output(pcb); +} + +static u16_t +altcp_tcp_mss(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_mss(pcb); +} + +static u16_t +altcp_tcp_sndbuf(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndbuf(pcb); +} + +static u16_t +altcp_tcp_sndqueuelen(struct altcp_pcb *conn) +{ + struct tcp_pcb *pcb; + if (conn == NULL) { + return 0; + } + ALTCP_TCP_ASSERT_CONN(conn); + pcb = (struct tcp_pcb *)conn->state; + return tcp_sndqueuelen(pcb); +} + +static void +altcp_tcp_nagle_disable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_disable(pcb); + } +} + +static void +altcp_tcp_nagle_enable(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_nagle_enable(pcb); + } +} + +static int +altcp_tcp_nagle_disabled(struct altcp_pcb *conn) +{ + if (conn && conn->state) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_nagle_disabled(pcb); + } + return 0; +} + +static void +altcp_tcp_setprio(struct altcp_pcb *conn, u8_t prio) +{ + if (conn != NULL) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + tcp_setprio(pcb, prio); + } +} + +static void +altcp_tcp_dealloc(struct altcp_pcb *conn) +{ + LWIP_UNUSED_ARG(conn); + ALTCP_TCP_ASSERT_CONN(conn); + /* no private state to clean up */ +} + +static err_t +altcp_tcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + return tcp_tcp_get_tcp_addrinfo(pcb, local, addr, port); + } + return ERR_VAL; +} + +static ip_addr_t * +altcp_tcp_get_ip(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return &pcb->local_ip; + } else { + return &pcb->remote_ip; + } + } + } + return NULL; +} + +static u16_t +altcp_tcp_get_port(struct altcp_pcb *conn, int local) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + if (local) { + return pcb->local_port; + } else { + return pcb->remote_port; + } + } + } + return 0; +} + +#ifdef LWIP_DEBUG +static enum tcp_state +altcp_tcp_dbg_get_tcp_state(struct altcp_pcb *conn) +{ + if (conn) { + struct tcp_pcb *pcb = (struct tcp_pcb *)conn->state; + ALTCP_TCP_ASSERT_CONN(conn); + if (pcb) { + return pcb->state; + } + } + return CLOSED; +} +#endif +const struct altcp_functions altcp_tcp_functions = { + altcp_tcp_set_poll, + altcp_tcp_recved, + altcp_tcp_bind, + altcp_tcp_connect, + altcp_tcp_listen, + altcp_tcp_abort, + altcp_tcp_close, + altcp_tcp_shutdown, + altcp_tcp_write, + altcp_tcp_output, + altcp_tcp_mss, + altcp_tcp_sndbuf, + altcp_tcp_sndqueuelen, + altcp_tcp_nagle_disable, + altcp_tcp_nagle_enable, + altcp_tcp_nagle_disabled, + altcp_tcp_setprio, + altcp_tcp_dealloc, + altcp_tcp_get_tcp_addrinfo, + altcp_tcp_get_ip, + altcp_tcp_get_port +#ifdef LWIP_DEBUG + , altcp_tcp_dbg_get_tcp_state +#endif +}; + +#endif /* LWIP_ALTCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c new file mode 100644 index 0000000..004a528 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/def.c @@ -0,0 +1,240 @@ +/** + * @file + * Common functions used throughout the stack. + * + * These are reference implementations of the byte swapping functions. + * Again with the aim of being simple, correct and fully portable. + * Byte swapping is the second thing you would want to optimize. You will + * need to port it to your architecture and in your cc.h: + * + * \#define lwip_htons(x) your_htons + * \#define lwip_htonl(x) your_htonl + * + * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. + * + * If you \#define them to htons() and htonl(), you should + * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from + * defining htonx/ntohx compatibility macros. + + * @defgroup sys_nonstandard Non-standard functions + * @ingroup sys_layer + * lwIP provides default implementations for non-standard functions. + * These can be mapped to OS functions to reduce code footprint if desired. + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/def.h" + +#include + +#if BYTE_ORDER == LITTLE_ENDIAN + +#if !defined(lwip_htons) +/** + * Convert an u16_t from host- to network byte order. + * + * @param n u16_t in host byte order + * @return n in network byte order + */ +u16_t +lwip_htons(u16_t n) +{ + return PP_HTONS(n); +} +#endif /* lwip_htons */ + +#if !defined(lwip_htonl) +/** + * Convert an u32_t from host- to network byte order. + * + * @param n u32_t in host byte order + * @return n in network byte order + */ +u32_t +lwip_htonl(u32_t n) +{ + return PP_HTONL(n); +} +#endif /* lwip_htonl */ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#ifndef lwip_strnstr +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnstr() non-standard function. + * This can be \#defined to strnstr() depending on your platform port. + */ +char * +lwip_strnstr(const char *buffer, const char *token, size_t n) +{ + const char *p; + size_t tokenlen = strlen(token); + if (tokenlen == 0) { + return LWIP_CONST_CAST(char *, buffer); + } + for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { + if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { + return LWIP_CONST_CAST(char *, p); + } + } + return NULL; +} +#endif + +#ifndef lwip_stricmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for stricmp() non-standard function. + * This can be \#defined to stricmp() depending on your platform port. + */ +int +lwip_stricmp(const char *str1, const char *str2) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + } while (c1 != 0); + return 0; +} +#endif + +#ifndef lwip_strnicmp +/** + * @ingroup sys_nonstandard + * lwIP default implementation for strnicmp() non-standard function. + * This can be \#defined to strnicmp() depending on your platform port. + */ +int +lwip_strnicmp(const char *str1, const char *str2, size_t len) +{ + char c1, c2; + + do { + c1 = *str1++; + c2 = *str2++; + if (c1 != c2) { + char c1_upc = c1 | 0x20; + if ((c1_upc >= 'a') && (c1_upc <= 'z')) { + /* characters are not equal an one is in the alphabet range: + downcase both chars and check again */ + char c2_upc = c2 | 0x20; + if (c1_upc != c2_upc) { + /* still not equal */ + /* don't care for < or > */ + return 1; + } + } else { + /* characters are not equal but none is in the alphabet range */ + return 1; + } + } + len--; + } while ((len != 0) && (c1 != 0)); + return 0; +} +#endif + +#ifndef lwip_itoa +/** + * @ingroup sys_nonstandard + * lwIP default implementation for itoa() non-standard function. + * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. + */ +void +lwip_itoa(char *result, size_t bufsize, int number) +{ + char *res = result; + char *tmp = result + bufsize - 1; + int n = (number >= 0) ? number : -number; + + /* handle invalid bufsize */ + if (bufsize < 2) { + if (bufsize == 1) { + *result = 0; + } + return; + } + + /* First, add sign */ + if (number < 0) { + *res++ = '-'; + } + /* Then create the string from the end and stop if buffer full, + and ensure output string is zero terminated */ + *tmp = 0; + while ((n != 0) && (tmp > res)) { + char val = (char)('0' + (n % 10)); + tmp--; + *tmp = val; + n = n / 10; + } + if (n) { + /* buffer is too small */ + *result = 0; + return; + } + if (*tmp == 0) { + /* Nothing added? */ + *res++ = '0'; + *res++ = 0; + return; + } + /* move from temporary buffer to output buffer (sign is not moved) */ + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); +} +#endif diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c new file mode 100644 index 0000000..737b339 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/dns.c @@ -0,0 +1,1631 @@ +/** + * @file + * DNS - host name to IP address resolver. + * + * @defgroup dns DNS + * @ingroup callbackstyle_api + * + * Implements a DNS host name to IP address resolver. + * + * The lwIP DNS resolver functions are used to lookup a host name and + * map it to a numerical IP address. It maintains a list of resolved + * hostnames that can be queried with the dns_lookup() function. + * New hostnames can be resolved using the dns_query() function. + * + * The lwIP version of the resolver also adds a non-blocking version of + * gethostbyname() that will work with a raw API application. This function + * checks for an IP address string first and converts it if it is valid. + * gethostbyname() then does a dns_lookup() to see if the name is + * already in the table. If so, the IP is returned. If not, a query is + * issued and the function returns with a ERR_INPROGRESS status. The app + * using the dns client must then go into a waiting state. + * + * Once a hostname has been resolved (or found to be non-existent), + * the resolver code calls a specified callback function (which + * must be implemented by the module that uses the resolver). + * + * Multicast DNS queries are supported for names ending on ".local". + * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 + * chapter 5.1), this is not a fully compliant implementation of continuous + * mDNS querying! + * + * All functions must be called from TCPIP thread. + * + * @see DNS_MAX_SERVERS + * @see LWIP_DHCP_MAX_DNS_SERVERS + * @see @ref netconn_common for thread-safe access. + */ + +/* + * Port to lwIP from uIP + * by Jim Pettinato April 2007 + * + * security fixes and more by Simon Goldschmidt + * + * uIP version Copyright (c) 2002-2003, Adam Dunkels. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*----------------------------------------------------------------------------- + * RFC 1035 - Domain names - implementation and specification + * RFC 2181 - Clarifications to the DNS Specification + *----------------------------------------------------------------------------*/ + +/** @todo: define good default values (rfc compliance) */ +/** @todo: improve answer parsing, more checkings... */ +/** @todo: check RFC1035 - 7.3. Processing responses */ +/** @todo: one-shot mDNS: dual-stack fallback to another IP version */ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/dns.h" +#include "lwip/prot/dns.h" + +#include + +/** Random generator function to create random TXIDs and source ports for queries */ +#ifndef DNS_RAND_TXID +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) +#define DNS_RAND_TXID LWIP_RAND +#else +static u16_t dns_txid; +#define DNS_RAND_TXID() (++dns_txid) +#endif +#endif + +/** Limits the source port to be >= 1024 by default */ +#ifndef DNS_PORT_ALLOWED +#define DNS_PORT_ALLOWED(port) ((port) >= 1024) +#endif + +/** DNS resource record max. TTL (one week as default) */ +#ifndef DNS_MAX_TTL +#define DNS_MAX_TTL 604800 +#elif DNS_MAX_TTL > 0x7FFFFFFF +#error DNS_MAX_TTL must be a positive 32-bit value +#endif + +#if DNS_TABLE_SIZE > 255 +#error DNS_TABLE_SIZE must fit into an u8_t +#endif +#if DNS_MAX_SERVERS > 255 +#error DNS_MAX_SERVERS must fit into an u8_t +#endif + +/* The number of parallel requests (i.e. calls to dns_gethostbyname + * that cannot be answered from the DNS table. + * This is set to the table size by default. + */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) +#ifndef DNS_MAX_REQUESTS +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#else +#if DNS_MAX_REQUESTS > 255 +#error DNS_MAX_REQUESTS must fit into an u8_t +#endif +#endif +#else +/* In this configuration, both arrays have to have the same size and are used + * like one entry (used/free) */ +#define DNS_MAX_REQUESTS DNS_TABLE_SIZE +#endif + +/* The number of UDP source ports used in parallel */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +#ifndef DNS_MAX_SOURCE_PORTS +#define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS +#else +#if DNS_MAX_SOURCE_PORTS > 255 +#error DNS_MAX_SOURCE_PORTS must fit into an u8_t +#endif +#endif +#else +#ifdef DNS_MAX_SOURCE_PORTS +#undef DNS_MAX_SOURCE_PORTS +#endif +#define DNS_MAX_SOURCE_PORTS 1 +#endif + +#if LWIP_IPV4 && LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) +#define LWIP_DNS_ADDRTYPE_ARG(x) , x +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x +#define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) +#else +#if LWIP_IPV6 +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 +#else +#define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 +#endif +#define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 +#define LWIP_DNS_ADDRTYPE_ARG(x) +#define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 +#define LWIP_DNS_SET_ADDRTYPE(x, y) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES +#define LWIP_DNS_ISMDNS_ARG(x) , x +#else +#define LWIP_DNS_ISMDNS_ARG(x) +#endif + +/** DNS query message structure. + No packing needed: only used locally on the stack. */ +struct dns_query { + /* DNS query record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; +}; +#define SIZEOF_DNS_QUERY 4 + +/** DNS answer message structure. + No packing needed: only used locally on the stack. */ +struct dns_answer { + /* DNS answer record starts with either a domain name or a pointer + to a name already present somewhere in the packet. */ + u16_t type; + u16_t cls; + u32_t ttl; + u16_t len; +}; +#define SIZEOF_DNS_ANSWER 10 +/* maximum allowed size for the struct due to non-packed */ +#define SIZEOF_DNS_ANSWER_ASSERT 12 + +/* DNS table entry states */ +typedef enum { + DNS_STATE_UNUSED = 0, + DNS_STATE_NEW = 1, + DNS_STATE_ASKING = 2, + DNS_STATE_DONE = 3 +} dns_state_enum_t; + +/** DNS table entry */ +struct dns_table_entry { + u32_t ttl; + ip_addr_t ipaddr; + u16_t txid; + u8_t state; + u8_t server_idx; + u8_t tmr; + u8_t retries; + u8_t seqno; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + u8_t pcb_idx; +#endif + char name[DNS_MAX_NAME_LENGTH]; +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif +}; + +/** DNS request table entry: used when dns_gehostbyname cannot answer the + * request from the DNS table */ +struct dns_req_entry { + /* pointer to callback on DNS query done */ + dns_found_callback found; + /* argument passed to the callback function */ + void *arg; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t dns_table_idx; +#endif +#if LWIP_IPV4 && LWIP_IPV6 + u8_t reqaddrtype; +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +}; + +#if DNS_LOCAL_HOSTLIST + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** Local host-list. For hostnames in this list, no + * external name resolution is performed */ +static struct local_hostlist_entry *local_hostlist_dynamic; +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE +#define DNS_LOCAL_HOSTLIST_STORAGE_PRE static +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ +/** Defining this allows the local_hostlist_static to be placed in a different + * linker section (e.g. FLASH) */ +#ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST +#define DNS_LOCAL_HOSTLIST_STORAGE_POST +#endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ +DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] + DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; + +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + +static void dns_init_local(void); +static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); +#endif /* DNS_LOCAL_HOSTLIST */ + + +/* forward declarations */ +static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); +static void dns_check_entries(void); +static void dns_call_found(u8_t idx, ip_addr_t *addr); + +/*----------------------------------------------------------------------------- + * Globals + *----------------------------------------------------------------------------*/ + +/* DNS variables */ +static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static u8_t dns_last_pcb_idx; +#endif +static u8_t dns_seqno; +static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; +static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; +static ip_addr_t dns_servers[DNS_MAX_SERVERS]; + +#if LWIP_IPV4 +const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; +#endif /* LWIP_IPV6 */ + +/** + * Initialize the resolver: set up the UDP pcb and configure the default server + * (if DNS_SERVER_ADDRESS is set). + */ +void +dns_init(void) +{ +#ifdef DNS_SERVER_ADDRESS + /* initialize default DNS server address */ + ip_addr_t dnsserver; + DNS_SERVER_ADDRESS(&dnsserver); + dns_setserver(0, &dnsserver); +#endif /* DNS_SERVER_ADDRESS */ + + LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", + sizeof(struct dns_query) == SIZEOF_DNS_QUERY); + LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", + sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); + + LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); + + /* if dns client not yet initialized... */ +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); + LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); + + /* initialize DNS table not needed (initialized to zero since it is a + * global variable) */ + LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", + DNS_STATE_UNUSED == 0); + + /* initialize DNS client */ + udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); + udp_recv(dns_pcbs[0], dns_recv, NULL); + } +#endif + +#if DNS_LOCAL_HOSTLIST + dns_init_local(); +#endif +} + +/** + * @ingroup dns + * Initialize one of the DNS servers. + * + * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS + * @param dnsserver IP address of the DNS server to set + */ +void +dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) +{ + if (numdns < DNS_MAX_SERVERS) { + if (dnsserver != NULL) { + dns_servers[numdns] = (*dnsserver); + } else { + dns_servers[numdns] = *IP_ADDR_ANY; + } + } +} + +/** + * @ingroup dns + * Obtain one of the currently configured DNS server. + * + * @param numdns the index of the DNS server + * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS + * server has not been configured. + */ +const ip_addr_t * +dns_getserver(u8_t numdns) +{ + if (numdns < DNS_MAX_SERVERS) { + return &dns_servers[numdns]; + } else { + return IP_ADDR_ANY; + } +} + +/** + * The DNS resolver client timer - handle retries and timeouts and should + * be called every DNS_TMR_INTERVAL milliseconds (every second by default). + */ +void +dns_tmr(void) +{ + LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); + dns_check_entries(); +} + +#if DNS_LOCAL_HOSTLIST +static void +dns_init_local(void) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) + size_t i; + struct local_hostlist_entry *entry; + /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ + struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; + size_t namelen; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { + struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; + LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); + namelen = strlen(init_entry->name); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); + if (entry != NULL) { + char *entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, init_entry->name, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + entry->addr = init_entry->addr; + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ +} + +/** + * @ingroup dns + * Iterate the local host-list for a hostname. + * + * @param iterator_fn a function that is called for every entry in the local host-list + * @param iterator_arg 3rd argument passed to iterator_fn + * @return the number of entries in the local host-list + */ +size_t +dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) +{ + size_t i; +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + i = 0; + while (entry != NULL) { + if (iterator_fn != NULL) { + iterator_fn(entry->name, &entry->addr, iterator_arg); + } + i++; + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if (iterator_fn != NULL) { + iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return i; +} + +/** + * @ingroup dns + * Scans the local host-list for a hostname. + * + * @param hostname Hostname to look for in the local host-list + * @param addr the first IP address for the hostname in the local host-list or + * IPADDR_NONE if not found. + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + * @return ERR_OK if found, ERR_ARG if not found + */ +err_t +dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) +{ + LWIP_UNUSED_ARG(dns_addrtype); + return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); +} + +/* Internal implementation for dns_local_lookup and dns_lookup */ +static err_t +dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC + struct local_hostlist_entry *entry = local_hostlist_dynamic; + while (entry != NULL) { + if ((lwip_stricmp(entry->name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { + if (addr) { + ip_addr_copy(*addr, entry->addr); + } + return ERR_OK; + } + entry = entry->next; + } +#else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + size_t i; + for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { + if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { + if (addr) { + ip_addr_copy(*addr, local_hostlist_static[i].addr); + } + return ERR_OK; + } + } +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ + return ERR_ARG; +} + +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +/** + * @ingroup dns + * Remove all entries from the local host-list for a specific hostname + * and/or IP address + * + * @param hostname hostname for which entries shall be removed from the local + * host-list + * @param addr address for which entries shall be removed from the local host-list + * @return the number of removed entries + */ +int +dns_local_removehost(const char *hostname, const ip_addr_t *addr) +{ + int removed = 0; + struct local_hostlist_entry *entry = local_hostlist_dynamic; + struct local_hostlist_entry *last_entry = NULL; + while (entry != NULL) { + if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && + ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { + struct local_hostlist_entry *free_entry; + if (last_entry != NULL) { + last_entry->next = entry->next; + } else { + local_hostlist_dynamic = entry->next; + } + free_entry = entry; + entry = entry->next; + memp_free(MEMP_LOCALHOSTLIST, free_entry); + removed++; + } else { + last_entry = entry; + entry = entry->next; + } + } + return removed; +} + +/** + * @ingroup dns + * Add a hostname/IP address pair to the local host-list. + * Duplicates are not checked. + * + * @param hostname hostname of the new entry + * @param addr IP address of the new entry + * @return ERR_OK if succeeded or ERR_MEM on memory error + */ +err_t +dns_local_addhost(const char *hostname, const ip_addr_t *addr) +{ + struct local_hostlist_entry *entry; + size_t namelen; + char *entry_name; + LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); + namelen = strlen(hostname); + LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); + entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); + if (entry == NULL) { + return ERR_MEM; + } + entry_name = (char *)entry + sizeof(struct local_hostlist_entry); + MEMCPY(entry_name, hostname, namelen); + entry_name[namelen] = 0; + entry->name = entry_name; + ip_addr_copy(entry->addr, *addr); + entry->next = local_hostlist_dynamic; + local_hostlist_dynamic = entry; + return ERR_OK; +} +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ +#endif /* DNS_LOCAL_HOSTLIST */ + +/** + * @ingroup dns + * Look up a hostname in the array of known hostnames. + * + * @note This function only looks in the internal array of known + * hostnames, it does not send out a query for the hostname if none + * was found. The function dns_enqueue() can be used to send a query + * for a hostname. + * + * @param name the hostname to look up + * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to + * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname + * was not found in the cached dns_table. + * @return ERR_OK if found, ERR_ARG if not found + */ +static err_t +dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) +{ + u8_t i; +#if DNS_LOCAL_HOSTLIST + if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOCAL_HOSTLIST */ +#ifdef DNS_LOOKUP_LOCAL_EXTERN + if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#endif /* DNS_LOOKUP_LOCAL_EXTERN */ + + /* Walk through name list, return entry if found. If not, return NULL. */ + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + if ((dns_table[i].state == DNS_STATE_DONE) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && + LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); + ip_addr_debug_print_val(DNS_DEBUG, dns_table[i].ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + if (addr) { + ip_addr_copy(*addr, dns_table[i].ipaddr); + } + return ERR_OK; + } + } + + return ERR_ARG; +} + +/** + * Compare the "dotted" name "query" with the encoded name "response" + * to make sure an answer from the DNS server matches the current dns_table + * entry (otherwise, answers might arrive late for hostname not on the list + * any more). + * + * For now, this function compares case-insensitive to cope with all kinds of + * servers. This also means that "dns 0x20 bit encoding" must be checked + * externally, if we want to implement it. + * Currently, the request is sent exactly as passed in by he user request. + * + * @param query hostname (not encoded) from the dns_table + * @param p pbuf containing the encoded hostname in the DNS response + * @param start_offset offset into p where the name starts + * @return 0xFFFF: names differ, other: names equal -> offset behind name + */ +static u16_t +dns_compare_name(const char *query, struct pbuf *p, u16_t start_offset) +{ + int n; + u16_t response_offset = start_offset; + + do { + n = pbuf_try_get_at(p, response_offset); + if ((n < 0) || (response_offset == 0xFFFF)) { + /* error or overflow */ + return 0xFFFF; + } + response_offset++; + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: cannot be equal since we don't send them */ + return 0xFFFF; + } else { + /* Not compressed name */ + while (n > 0) { + int c = pbuf_try_get_at(p, response_offset); + if (c < 0) { + return 0xFFFF; + } + if (lwip_tolower((*query)) != lwip_tolower((u8_t)c)) { + return 0xFFFF; + } + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + response_offset++; + ++query; + --n; + } + ++query; + } + n = pbuf_try_get_at(p, response_offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (response_offset == 0xFFFF) { + /* would overflow */ + return 0xFFFF; + } + return (u16_t)(response_offset + 1); +} + +/** + * Walk through a compact encoded DNS name and return the end of the name. + * + * @param p pbuf containing the name + * @param query_idx start index into p pointing to encoded DNS name in the DNS server response + * @return index to end of the name + */ +static u16_t +dns_skip_name(struct pbuf *p, u16_t query_idx) +{ + int n; + u16_t offset = query_idx; + + do { + n = pbuf_try_get_at(p, offset++); + if ((n < 0) || (offset == 0)) { + return 0xFFFF; + } + /** @see RFC 1035 - 4.1.4. Message compression */ + if ((n & 0xc0) == 0xc0) { + /* Compressed name: since we only want to skip it (not check it), stop here */ + break; + } else { + /* Not compressed name */ + if (offset + n >= p->tot_len) { + return 0xFFFF; + } + offset = (u16_t)(offset + n); + } + n = pbuf_try_get_at(p, offset); + if (n < 0) { + return 0xFFFF; + } + } while (n != 0); + + if (offset == 0xFFFF) { + return 0xFFFF; + } + return (u16_t)(offset + 1); +} + +/** + * Send a DNS query packet. + * + * @param idx the DNS table entry index for which to send a request + * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise + */ +static err_t +dns_send(u8_t idx) +{ + err_t err; + struct dns_hdr hdr; + struct dns_query qry; + struct pbuf *p; + u16_t query_idx, copy_len; + const char *hostname, *hostname_part; + u8_t n; + u8_t pcb_idx; + struct dns_table_entry *entry = &dns_table[idx]; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", + (u16_t)(entry->server_idx), entry->name)); + LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); + if (ip_addr_isany_val(dns_servers[entry->server_idx]) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif + ) { + /* DNS server not valid anymore, e.g. PPP netif has been shut down */ + /* call specified callback function if provided */ + dns_call_found(idx, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + return ERR_OK; + } + + /* if here, we have either a new query or a retry on a previous query to process */ + p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + + SIZEOF_DNS_QUERY), PBUF_RAM); + if (p != NULL) { + const ip_addr_t *dst; + u16_t dst_port; + /* fill dns header */ + memset(&hdr, 0, SIZEOF_DNS_HDR); + hdr.id = lwip_htons(entry->txid); + hdr.flags1 = DNS_FLAG1_RD; + hdr.numquestions = PP_HTONS(1); + pbuf_take(p, &hdr, SIZEOF_DNS_HDR); + hostname = entry->name; + --hostname; + + /* convert hostname into suitable query format. */ + query_idx = SIZEOF_DNS_HDR; + do { + ++hostname; + hostname_part = hostname; + for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { + ++n; + } + copy_len = (u16_t)(hostname - hostname_part); + if (query_idx + n + 1 > 0xFFFF) { + /* u16_t overflow */ + goto overflow_return; + } + pbuf_put_at(p, query_idx, n); + pbuf_take_at(p, hostname_part, copy_len, (u16_t)(query_idx + 1)); + query_idx = (u16_t)(query_idx + n + 1); + } while (*hostname != 0); + pbuf_put_at(p, query_idx, 0); + query_idx++; + + /* fill dns query */ + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + qry.type = PP_HTONS(DNS_RRTYPE_AAAA); + } else { + qry.type = PP_HTONS(DNS_RRTYPE_A); + } + qry.cls = PP_HTONS(DNS_RRCLASS_IN); + pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + pcb_idx = entry->pcb_idx; +#else + pcb_idx = 0; +#endif + /* send dns packet */ + LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", + entry->txid, entry->name, entry->server_idx)); +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (entry->is_mdns) { + dst_port = DNS_MQUERY_PORT; +#if LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { + dst = &dns_mquery_v6group; + } +#endif +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif +#if LWIP_IPV4 + { + dst = &dns_mquery_v4group; + } +#endif + } else +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + dst_port = DNS_SERVER_PORT; + dst = &dns_servers[entry->server_idx]; + } + err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); + + /* free pbuf */ + pbuf_free(p); + } else { + err = ERR_MEM; + } + + return err; +overflow_return: + pbuf_free(p); + return ERR_VAL; +} + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) +static struct udp_pcb * +dns_alloc_random_port(void) +{ + err_t err; + struct udp_pcb *pcb; + + pcb = udp_new_ip_type(IPADDR_TYPE_ANY); + if (pcb == NULL) { + /* out of memory, have to reuse an existing pcb */ + return NULL; + } + do { + u16_t port = (u16_t)DNS_RAND_TXID(); + if (DNS_PORT_ALLOWED(port)) { + err = udp_bind(pcb, IP_ANY_TYPE, port); + } else { + /* this port is not allowed, try again */ + err = ERR_USE; + } + } while (err == ERR_USE); + if (err != ERR_OK) { + udp_remove(pcb); + return NULL; + } + udp_recv(pcb, dns_recv, NULL); + return pcb; +} + +/** + * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used + * for sending a request + * + * @return an index into dns_pcbs + */ +static u8_t +dns_alloc_pcb(void) +{ + u8_t i; + u8_t idx; + + for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { + if (dns_pcbs[i] == NULL) { + break; + } + } + if (i < DNS_MAX_SOURCE_PORTS) { + dns_pcbs[i] = dns_alloc_random_port(); + if (dns_pcbs[i] != NULL) { + /* succeeded */ + dns_last_pcb_idx = i; + return i; + } + } + /* if we come here, creating a new UDP pcb failed, so we have to use + an already existing one (so overflow is no issue) */ + for (i = 0, idx = (u8_t)(dns_last_pcb_idx + 1); i < DNS_MAX_SOURCE_PORTS; i++, idx++) { + if (idx >= DNS_MAX_SOURCE_PORTS) { + idx = 0; + } + if (dns_pcbs[idx] != NULL) { + dns_last_pcb_idx = idx; + return idx; + } + } + return DNS_MAX_SOURCE_PORTS; +} +#endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ + +/** + * dns_call_found() - call the found callback and check if there are duplicate + * entries for the given hostname. If there are any, their found callback will + * be called and they will be removed. + * + * @param idx dns table index of the entry that is resolved or removed + * @param addr IP address for the hostname (or NULL on error or memory shortage) + */ +static void +dns_call_found(u8_t idx, ip_addr_t *addr) +{ +#if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) + u8_t i; +#endif + +#if LWIP_IPV4 && LWIP_IPV6 + if (addr != NULL) { + /* check that address type matches the request and adapt the table entry */ + if (IP_IS_V6_VAL(*addr)) { + LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); + dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { + (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); + /* flush this entry */ + dns_requests[i].found = NULL; + } + } +#else + if (dns_requests[idx].found) { + (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); + } + dns_requests[idx].found = NULL; +#endif +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + /* close the pcb used unless other request are using it */ + for (i = 0; i < DNS_MAX_REQUESTS; i++) { + if (i == idx) { + continue; /* only check other requests */ + } + if (dns_table[i].state == DNS_STATE_ASKING) { + if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { + /* another request is still using the same pcb */ + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + break; + } + } + } + if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { + /* if we come here, the pcb is not used any more and can be removed */ + udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); + dns_pcbs[dns_table[idx].pcb_idx] = NULL; + dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; + } +#endif +} + +/* Create a query transmission ID that is unique for all outstanding queries */ +static u16_t +dns_create_txid(void) +{ + u16_t txid; + u8_t i; + +again: + txid = (u16_t)DNS_RAND_TXID(); + + /* check whether the ID is unique */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (dns_table[i].txid == txid)) { + /* ID already used by another pending query */ + goto again; + } + } + + return txid; +} + +/** + * Check whether there are other backup DNS servers available to try + */ +static u8_t +dns_backupserver_available(struct dns_table_entry *pentry) +{ + u8_t ret = 0; + + if (pentry) { + if ((pentry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[pentry->server_idx + 1])) { + ret = 1; + } + } + + return ret; +} + +/** + * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. + * Check an entry in the dns_table: + * - send out query for new entries + * - retry old pending entries on timeout (also with different servers) + * - remove completed entries from the table if their TTL has expired + * + * @param i index of the dns_table entry to check + */ +static void +dns_check_entry(u8_t i) +{ + err_t err; + struct dns_table_entry *entry = &dns_table[i]; + + LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); + + switch (entry->state) { + case DNS_STATE_NEW: + /* initialize new entry */ + entry->txid = dns_create_txid(); + entry->state = DNS_STATE_ASKING; + entry->server_idx = 0; + entry->tmr = 1; + entry->retries = 0; + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + break; + case DNS_STATE_ASKING: + if (--entry->tmr == 0) { + if (++entry->retries == DNS_MAX_RETRIES) { + if (dns_backupserver_available(entry) +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + && !entry->is_mdns +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + ) { + /* change of server */ + entry->server_idx++; + entry->tmr = 1; + entry->retries = 0; + } else { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); + /* call specified callback function if provided */ + dns_call_found(i, NULL); + /* flush this entry */ + entry->state = DNS_STATE_UNUSED; + break; + } + } else { + /* wait longer for the next retry */ + entry->tmr = entry->retries; + } + + /* send DNS packet for this entry */ + err = dns_send(i); + if (err != ERR_OK) { + LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, + ("dns_send returned error: %s\n", lwip_strerr(err))); + } + } + break; + case DNS_STATE_DONE: + /* if the time to live is nul */ + if ((entry->ttl == 0) || (--entry->ttl == 0)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); + /* flush this entry, there cannot be any related pending entries in this state */ + entry->state = DNS_STATE_UNUSED; + } + break; + case DNS_STATE_UNUSED: + /* nothing to do */ + break; + default: + LWIP_ASSERT("unknown dns_table entry state:", 0); + break; + } +} + +/** + * Call dns_check_entry for each entry in dns_table - check all entries. + */ +static void +dns_check_entries(void) +{ + u8_t i; + + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + dns_check_entry(i); + } +} + +/** + * Save TTL and call dns_call_found for correct response. + */ +static void +dns_correct_response(u8_t idx, u32_t ttl) +{ + struct dns_table_entry *entry = &dns_table[idx]; + + entry->state = DNS_STATE_DONE; + + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); + ip_addr_debug_print_val(DNS_DEBUG, entry->ipaddr); + LWIP_DEBUGF(DNS_DEBUG, ("\n")); + + /* read the answer resource record's TTL, and maximize it if needed */ + entry->ttl = ttl; + if (entry->ttl > DNS_MAX_TTL) { + entry->ttl = DNS_MAX_TTL; + } + dns_call_found(idx, &entry->ipaddr); + + if (entry->ttl == 0) { + /* RFC 883, page 29: "Zero values are + interpreted to mean that the RR can only be used for the + transaction in progress, and should not be cached." + -> flush this entry now */ + /* entry reused during callback? */ + if (entry->state == DNS_STATE_DONE) { + entry->state = DNS_STATE_UNUSED; + } + } +} + +/** + * Receive input function for DNS response packets arriving for the dns UDP pcb. + */ +static void +dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + u8_t i; + u16_t txid; + u16_t res_idx; + struct dns_hdr hdr; + struct dns_answer ans; + struct dns_query qry; + u16_t nquestions, nanswers; + + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(port); + + /* is the dns message big enough ? */ + if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); + /* free pbuf and return */ + goto ignore_packet; + } + + /* copy dns payload inside static buffer for processing */ + if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { + /* Match the ID in the DNS header with the name table. */ + txid = lwip_htons(hdr.id); + for (i = 0; i < DNS_TABLE_SIZE; i++) { + struct dns_table_entry *entry = &dns_table[i]; + if ((entry->state == DNS_STATE_ASKING) && + (entry->txid == txid)) { + + /* We only care about the question(s) and the answers. The authrr + and the extrarr are simply discarded. */ + nquestions = lwip_htons(hdr.numquestions); + nanswers = lwip_htons(hdr.numanswers); + + /* Check for correct response. */ + if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + if (nquestions != 1) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (!entry->is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* Check whether response comes from the same network address to which the + question was sent. (RFC 5452) */ + if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { + goto ignore_packet; /* ignore this packet */ + } + } + + /* Check if the name in the "question" part match with the name in the entry and + skip it if equal. */ + res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); + if (res_idx == 0xFFFF) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + + /* check if "question" part matches the request */ + if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { + goto ignore_packet; /* ignore this packet */ + } + if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || + (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || + (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); + goto ignore_packet; /* ignore this packet */ + } + /* skip the rest of the "question" part */ + if (res_idx + SIZEOF_DNS_QUERY > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_QUERY); + + /* Check for error. If so, call callback to inform. */ + if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); + + /* if there is another backup DNS server to try + * then don't stop the DNS request + */ + if (dns_backupserver_available(entry)) { + /* avoid retrying the same server */ + entry->retries = DNS_MAX_RETRIES-1; + entry->tmr = 1; + + /* contact next available server for this entry */ + dns_check_entry(i); + + goto ignore_packet; + } + } else { + while ((nanswers > 0) && (res_idx < p->tot_len)) { + /* skip answer resource record's host name */ + res_idx = dns_skip_name(p, res_idx); + if (res_idx == 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + + /* Check for IP address type and Internet class. Others are discarded. */ + if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { + goto ignore_packet; /* ignore this packet */ + } + if (res_idx + SIZEOF_DNS_ANSWER > 0xFFFF) { + goto ignore_packet; + } + res_idx = (u16_t)(res_idx + SIZEOF_DNS_ANSWER); + + if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { +#if LWIP_IPV4 + if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip4_addr_t ip4addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { + goto ignore_packet; /* ignore this packet */ + } + ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_p_t)))) { +#if LWIP_IPV4 && LWIP_IPV6 + if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + ip6_addr_p_t ip6addr; + /* read the IP address after answer resource record's header */ + if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_p_t), res_idx) != sizeof(ip6_addr_p_t)) { + goto ignore_packet; /* ignore this packet */ + } + /* @todo: scope ip6addr? Might be required for link-local addresses at least? */ + ip_addr_copy_from_ip6_packed(dns_table[i].ipaddr, ip6addr); + pbuf_free(p); + /* handle correct response */ + dns_correct_response(i, lwip_ntohl(ans.ttl)); + return; + } + } +#endif /* LWIP_IPV6 */ + } + /* skip this answer */ + if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { + goto ignore_packet; /* ignore this packet */ + } + res_idx = (u16_t)(res_idx + lwip_htons(ans.len)); + --nanswers; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || + (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + /* IPv4 failed, try IPv6 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; + } else { + /* IPv6 failed, try IPv4 */ + dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; + } + pbuf_free(p); + dns_table[i].state = DNS_STATE_NEW; + dns_check_entry(i); + return; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); + } + /* call callback to indicate error, clean up memory and return */ + pbuf_free(p); + dns_call_found(i, NULL); + dns_table[i].state = DNS_STATE_UNUSED; + return; + } + } + } + +ignore_packet: + /* deallocate memory and return */ + pbuf_free(p); + return; +} + +/** + * Queues a new hostname to resolve and sends out a DNS query for that hostname + * + * @param name the hostname that is to be queried + * @param hostnamelen length of the hostname + * @param found a callback function to be called on success, failure or timeout + * @param callback_arg argument to pass to the callback function + * @return err_t return code. + */ +static err_t +dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, + void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) +{ + u8_t i; + u8_t lseq, lseqi; + struct dns_table_entry *entry = NULL; + size_t namelen; + struct dns_req_entry *req; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + u8_t r; + /* check for duplicate entries */ + for (i = 0; i < DNS_TABLE_SIZE; i++) { + if ((dns_table[i].state == DNS_STATE_ASKING) && + (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { +#if LWIP_IPV4 && LWIP_IPV6 + if (dns_table[i].reqaddrtype != dns_addrtype) { + /* requested address types don't match + this can lead to 2 concurrent requests, but mixing the address types + for the same host should not be that common */ + continue; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + /* this is a duplicate entry, find a free request entry */ + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == 0) { + dns_requests[r].found = found; + dns_requests[r].arg = callback_arg; + dns_requests[r].dns_table_idx = i; + LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); + return ERR_INPROGRESS; + } + } + } + } + /* no duplicate entries found */ +#endif + + /* search an unused entry, or the oldest one */ + lseq = 0; + lseqi = DNS_TABLE_SIZE; + for (i = 0; i < DNS_TABLE_SIZE; ++i) { + entry = &dns_table[i]; + /* is it an unused entry ? */ + if (entry->state == DNS_STATE_UNUSED) { + break; + } + /* check if this is the oldest completed entry */ + if (entry->state == DNS_STATE_DONE) { + u8_t age = (u8_t)(dns_seqno - entry->seqno); + if (age > lseq) { + lseq = age; + lseqi = i; + } + } + } + + /* if we don't have found an unused entry, use the oldest completed one */ + if (i == DNS_TABLE_SIZE) { + if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { + /* no entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); + return ERR_MEM; + } else { + /* use the oldest completed one */ + i = lseqi; + entry = &dns_table[i]; + } + } + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) + /* find a free request entry */ + req = NULL; + for (r = 0; r < DNS_MAX_REQUESTS; r++) { + if (dns_requests[r].found == NULL) { + req = &dns_requests[r]; + break; + } + } + if (req == NULL) { + /* no request entry can be used now, table is full */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); + return ERR_MEM; + } + req->dns_table_idx = i; +#else + /* in this configuration, the entry index is the same as the request index */ + req = &dns_requests[i]; +#endif + + /* use this entry */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); + + /* fill the entry */ + entry->state = DNS_STATE_NEW; + entry->seqno = dns_seqno; + LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); + LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); + req->found = found; + req->arg = callback_arg; + namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH - 1); + MEMCPY(entry->name, name, namelen); + entry->name[namelen] = 0; + +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) + entry->pcb_idx = dns_alloc_pcb(); + if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { + /* failed to get a UDP pcb */ + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); + entry->state = DNS_STATE_UNUSED; + req->found = NULL; + return ERR_MEM; + } + LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); +#endif + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + entry->is_mdns = is_mdns; +#endif + + dns_seqno++; + + /* force to send query without waiting timer */ + dns_check_entry(i); + + /* dns query is enqueued */ + return ERR_INPROGRESS; +} + +/** + * @ingroup dns + * Resolve a hostname (string) into an IP address. + * NON-BLOCKING callback version for use with raw API!!! + * + * Returns immediately with one of err_t return codes: + * - ERR_OK if hostname is a valid IP address string or the host + * name is already in the local names table. + * - ERR_INPROGRESS enqueue a request to be sent to the DNS server + * for resolution if no errors are present. + * - ERR_ARG: dns client not initialized or invalid hostname + * + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @return a err_t return code. + */ +err_t +dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg) +{ + return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); +} + +/** + * @ingroup dns + * Like dns_gethostbyname, but returned address type can be controlled: + * @param hostname the hostname that is to be queried + * @param addr pointer to a ip_addr_t where to store the address if it is already + * cached in the dns_table (only valid if ERR_OK is returned!) + * @param found a callback function to be called on success, failure or timeout (only if + * ERR_INPROGRESS is returned!) + * @param callback_arg argument to pass to the callback function + * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only + * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only + * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only + * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only + */ +err_t +dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, + void *callback_arg, u8_t dns_addrtype) +{ + size_t hostnamelen; +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + u8_t is_mdns; +#endif + /* not initialized or no valid server yet, or invalid addr pointer + * or invalid hostname or invalid hostname length */ + if ((addr == NULL) || + (!hostname) || (!hostname[0])) { + return ERR_ARG; + } +#if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) + if (dns_pcbs[0] == NULL) { + return ERR_ARG; + } +#endif + hostnamelen = strlen(hostname); + if (hostnamelen >= DNS_MAX_NAME_LENGTH) { + LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); + return ERR_ARG; + } + + +#if LWIP_HAVE_LOOPIF + if (strcmp(hostname, "localhost") == 0) { + ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); + return ERR_OK; + } +#endif /* LWIP_HAVE_LOOPIF */ + + /* host name already in octet notation? set ip addr and return ERR_OK */ + if (ipaddr_aton(hostname, addr)) { +#if LWIP_IPV4 && LWIP_IPV6 + if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || + (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + { + return ERR_OK; + } + } + /* already have this address cached? */ + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { + return ERR_OK; + } +#if LWIP_IPV4 && LWIP_IPV6 + if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { + /* fallback to 2nd IP type and try again to lookup */ + u8_t fallback; + if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { + fallback = LWIP_DNS_ADDRTYPE_IPV6; + } else { + fallback = LWIP_DNS_ADDRTYPE_IPV4; + } + if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { + return ERR_OK; + } + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(dns_addrtype); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#if LWIP_DNS_SUPPORT_MDNS_QUERIES + if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { + is_mdns = 1; + } else { + is_mdns = 0; + } + + if (!is_mdns) +#endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ + { + /* prevent calling found callback if no server is set, return error instead */ + if (ip_addr_isany_val(dns_servers[0])) { + return ERR_VAL; + } + } + + /* queue query with specified callback */ + return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) + LWIP_DNS_ISMDNS_ARG(is_mdns)); +} + +#endif /* LWIP_DNS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c new file mode 100644 index 0000000..25255b9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/inet_chksum.c @@ -0,0 +1,608 @@ +/** + * @file + * Internet checksum functions.\n + * + * These are some reference implementations of the checksum algorithm, with the + * aim of being simple, correct and fully portable. Checksumming is the + * first thing you would want to optimize for your platform. If you create + * your own version, link it in and in your cc.h put: + * + * \#define LWIP_CHKSUM your_checksum_routine + * + * Or you can select from the implementations below by defining + * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/inet_chksum.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" + +#include + +#ifndef LWIP_CHKSUM +# define LWIP_CHKSUM lwip_standard_chksum +# ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 2 +# endif +u16_t lwip_standard_chksum(const void *dataptr, int len); +#endif +/* If none set: */ +#ifndef LWIP_CHKSUM_ALGORITHM +# define LWIP_CHKSUM_ALGORITHM 0 +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ +/** + * lwip checksum + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * @note accumulator size limits summable length to 64k + * @note host endianess is irrelevant (p3 RFC1071) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + u32_t acc; + u16_t src; + const u8_t *octetptr; + + acc = 0; + /* dataptr may be at odd or even addresses */ + octetptr = (const u8_t *)dataptr; + while (len > 1) { + /* declare first octet as most significant + thus assume network order, ignoring host order */ + src = (*octetptr) << 8; + octetptr++; + /* declare second octet as least significant */ + src |= (*octetptr); + octetptr++; + acc += src; + len -= 2; + } + if (len > 0) { + /* accumulate remaining octet */ + src = (*octetptr) << 8; + acc += src; + } + /* add deferred carry bits */ + acc = (acc >> 16) + (acc & 0x0000ffffUL); + if ((acc & 0xffff0000UL) != 0) { + acc = (acc >> 16) + (acc & 0x0000ffffUL); + } + /* This maybe a little confusing: reorder sum using lwip_htons() + instead of lwip_ntohs() since it has a little less call overhead. + The caller must invert bits for Internet sum ! */ + return lwip_htons((u16_t)acc); +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ +/* + * Curt McDowell + * Broadcom Corp. + * csm@broadcom.com + * + * IP checksum two bytes at a time with support for + * unaligned buffer. + * Works for len up to and including 0x20000. + * by Curt McDowell, Broadcom Corp. 12/08/2005 + * + * @param dataptr points to start of data to be summed at any boundary + * @param len length of data to be summed + * @return host order (!) lwip checksum (non-inverted Internet sum) + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + u32_t sum = 0; + int odd = ((mem_ptr_t)pb & 1); + + /* Get aligned to u16_t */ + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + /* Add the bulk of the data */ + ps = (const u16_t *)(const void *)pb; + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* Consume left-over byte, if any */ + if (len > 0) { + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + /* Add end bytes */ + sum += t; + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + /* Swap if alignment was odd */ + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +#if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ +/** + * An optimized checksum routine. Basically, it uses loop-unrolling on + * the checksum loop, treating the head and tail bytes specially, whereas + * the inner loop acts on 8 bytes at a time. + * + * @arg start of buffer to be checksummed. May be an odd byte address. + * @len number of bytes in the buffer to be checksummed. + * @return host order (!) lwip checksum (non-inverted Internet sum) + * + * by Curt McDowell, Broadcom Corp. December 8th, 2005 + */ +u16_t +lwip_standard_chksum(const void *dataptr, int len) +{ + const u8_t *pb = (const u8_t *)dataptr; + const u16_t *ps; + u16_t t = 0; + const u32_t *pl; + u32_t sum = 0, tmp; + /* starts at odd byte address? */ + int odd = ((mem_ptr_t)pb & 1); + + if (odd && len > 0) { + ((u8_t *)&t)[1] = *pb++; + len--; + } + + ps = (const u16_t *)(const void *)pb; + + if (((mem_ptr_t)ps & 3) && len > 1) { + sum += *ps++; + len -= 2; + } + + pl = (const u32_t *)(const void *)ps; + + while (len > 7) { + tmp = sum + *pl++; /* ping */ + if (tmp < sum) { + tmp++; /* add back carry */ + } + + sum = tmp + *pl++; /* pong */ + if (sum < tmp) { + sum++; /* add back carry */ + } + + len -= 8; + } + + /* make room in upper bits */ + sum = FOLD_U32T(sum); + + ps = (const u16_t *)pl; + + /* 16-bit aligned word remaining? */ + while (len > 1) { + sum += *ps++; + len -= 2; + } + + /* dangling tail byte remaining? */ + if (len > 0) { /* include odd byte */ + ((u8_t *)&t)[0] = *(const u8_t *)ps; + } + + sum += t; /* add end bytes */ + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + sum = FOLD_U32T(sum); + sum = FOLD_U32T(sum); + + if (odd) { + sum = SWAP_BYTES_IN_WORD(sum); + } + + return (u16_t)sum; +} +#endif + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + + /* iterate through all pbuf in chain */ + for (q = p; q != NULL; q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + acc += LWIP_CHKSUM(q->payload, q->len); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* just executing this next line is probably faster that the if statement needed + to check whether we really need to execute it, and does no harm */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_base(p, proto, proto_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ +static u16_t +inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, u32_t acc) +{ + struct pbuf *q; + int swapped = 0; + u16_t chklen; + + /* iterate through all pbuf in chain */ + for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", + (void *)q, (void *)q->next)); + chklen = q->len; + if (chklen > chksum_len) { + chklen = chksum_len; + } + acc += LWIP_CHKSUM(q->payload, chklen); + chksum_len = (u16_t)(chksum_len - chklen); + LWIP_ASSERT("delete me", chksum_len < 0x7fff); + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ + /* fold the upper bit down */ + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + + acc += (u32_t)lwip_htons((u16_t)proto); + acc += (u32_t)lwip_htons(proto_len); + + /* Fold 32-bit sum to 16 bits + calling this twice is probably faster than if statements... */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); + return (u16_t)~(acc & 0xffffUL); +} + +#if LWIP_IPV4 +/* inet_chksum_pseudo_partial: + * + * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * IP addresses are expected to be in network byte order. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) +{ + u32_t acc; + u32_t addr; + + addr = ip4_addr_get_u32(src); + acc = (addr & 0xffffUL); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = ip4_addr_get_u32(dest); + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +/** + * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. + * IPv6 addresses are expected to be in network byte order. Will only compute for a + * portion of the payload. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param proto ipv6 protocol/next header (used for checksum of pseudo header) + * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) + * @param chksum_len number of payload bytes used to compute chksum + * @param src source ipv6 address (used for checksum of pseudo header) + * @param dest destination ipv6 address (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) +{ + u32_t acc = 0; + u32_t addr; + u8_t addr_part; + + for (addr_part = 0; addr_part < 4; addr_part++) { + addr = src->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + addr = dest->addr[addr_part]; + acc = (u32_t)(acc + (addr & 0xffffUL)); + acc = (u32_t)(acc + ((addr >> 16) & 0xffffUL)); + } + /* fold down to 16 bits */ + acc = FOLD_U32T(acc); + acc = FOLD_U32T(acc); + + return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); +} +#endif /* LWIP_IPV6 */ + +/* ip_chksum_pseudo_partial: + * + * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. + * + * @param p chain of pbufs over that a checksum should be calculated (ip data part) + * @param src source ip address (used for checksum of pseudo header) + * @param dst destination ip address (used for checksum of pseudo header) + * @param proto ip protocol (used for checksum of pseudo header) + * @param proto_len length of the ip data part (used for checksum of pseudo header) + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) +{ +#if LWIP_IPV6 + if (IP_IS_V6(dest)) { + return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + { + return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); + } +#endif /* LWIP_IPV4 */ +} + +/* inet_chksum: + * + * Calculates the Internet checksum over a portion of memory. Used primarily for IP + * and ICMP. + * + * @param dataptr start of the buffer to calculate the checksum (no alignment needed) + * @param len length of the buffer to calculate the checksum + * @return checksum (as u16_t) to be saved directly in the protocol header + */ + +u16_t +inet_chksum(const void *dataptr, u16_t len) +{ + return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); +} + +/** + * Calculate a checksum over a chain of pbufs (without pseudo-header, much like + * inet_chksum only pbufs are used). + * + * @param p pbuf chain over that the checksum should be calculated + * @return checksum (as u16_t) to be saved directly in the protocol header + */ +u16_t +inet_chksum_pbuf(struct pbuf *p) +{ + u32_t acc; + struct pbuf *q; + int swapped = 0; + + acc = 0; + for (q = p; q != NULL; q = q->next) { + acc += LWIP_CHKSUM(q->payload, q->len); + acc = FOLD_U32T(acc); + if (q->len % 2 != 0) { + swapped = !swapped; + acc = SWAP_BYTES_IN_WORD(acc); + } + } + + if (swapped) { + acc = SWAP_BYTES_IN_WORD(acc); + } + return (u16_t)~(acc & 0xffffUL); +} + +/* These are some implementations for LWIP_CHKSUM_COPY, which copies data + * like MEMCPY but generates a checksum at the same time. Since this is a + * performance-sensitive function, you might want to create your own version + * in assembly targeted at your hardware by defining it in lwipopts.h: + * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) + */ + +#if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ +/** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. + * For architectures with big caches, data might still be in cache when + * generating the checksum after copying. + */ +u16_t +lwip_chksum_copy(void *dst, const void *src, u16_t len) +{ + MEMCPY(dst, src, len); + return LWIP_CHKSUM(dst, len); +} +#endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c new file mode 100644 index 0000000..b688536 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/init.c @@ -0,0 +1,380 @@ +/** + * @file + * Modules initialization + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include "lwip/init.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/sockets.h" +#include "lwip/ip.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/timeouts.h" +#include "lwip/etharp.h" +#include "lwip/ip6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/api.h" + +#include "netif/ppp/ppp_opts.h" +#include "netif/ppp/ppp_impl.h" + +#ifndef LWIP_SKIP_PACKING_CHECK + +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct packed_struct_test { + PACK_STRUCT_FLD_8(u8_t dummy1); + PACK_STRUCT_FIELD(u32_t dummy2); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif +#define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 + +#endif + +/* Compile-time sanity checks for configuration errors. + * These can be done independently of LWIP_DEBUG, without penalty. + */ +#ifndef BYTE_ORDER +#error "BYTE_ORDER is not defined, you have to define it in your cc.h" +#endif +#if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) +#error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_UDPLITE) +#error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DHCP) +#error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && !LWIP_RAW && LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 and/or LWIP_RAW=1 in your lwipopts.h" +#endif +#if (!LWIP_UDP && LWIP_DNS) +#error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" +#endif +#if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ +#if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) +#error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" +#endif +#if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) +#error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) +#error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) +#error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) +#error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) +#error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" +#endif +#if (LWIP_IGMP && !LWIP_IPV4) +#error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" +#endif +#if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) +#error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" +#endif +/* There must be sufficient timeouts, taking into account requirements of the subsystems. */ +#if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < LWIP_NUM_SYS_TIMEOUT_INTERNAL) +#error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" +#endif +#if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) +#error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" +#endif +#endif /* !MEMP_MEM_MALLOC */ +#if LWIP_WND_SCALE +#if (LWIP_TCP && (TCP_WND > 0xffffffff)) +#error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_RCV_SCALE > 14)) +#error "The maximum valid window scale value is 14!" +#endif +#if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) +#error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" +#endif +#if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) +#error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" +#endif +#else /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_WND > 0xffff)) +#error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" +#endif +#endif /* LWIP_WND_SCALE */ +#if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) +#error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" +#endif +#if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) +#error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" +#endif +#if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) +#error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" +#endif +#if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) +#error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && !TCP_QUEUE_OOSEQ) +#error "To use LWIP_TCP_SACK_OUT, TCP_QUEUE_OOSEQ needs to be enabled" +#endif +#if (LWIP_TCP && LWIP_TCP_SACK_OUT && (LWIP_TCP_MAX_SACK_NUM < 1)) +#error "LWIP_TCP_MAX_SACK_NUM must be greater than 0" +#endif +#if (LWIP_NETIF_API && (NO_SYS==1)) +#error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) +#error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (NO_SYS==1)) +#error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" +#endif +#if (LWIP_PPP_API && (PPP_SUPPORT==0)) +#error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) +#error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" +#endif +#if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) +#error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" +#endif +#if (!LWIP_ARP && LWIP_AUTOIP) +#error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" +#endif +#if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) +#error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" +#endif +#if (LWIP_ALTCP && LWIP_EVENT_API) +#error "The application layered tcp API does not work with LWIP_EVENT_API" +#endif +#if (MEM_LIBC_MALLOC && MEM_USE_POOLS) +#error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" +#endif +#if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) +#error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" +#endif +#if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) +#error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" +#endif +#if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) +#error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" +#endif +#if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT +#error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" +#endif +#if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT +#error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" +#endif +#if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 +#error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" +#endif +#if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 +#error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" +#endif +#if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) +#error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" +#endif +#if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING +#error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" +#endif +#if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE +#error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" +#endif +#if LWIP_NETCONN && LWIP_TCP +#if NETCONN_COPY != TCP_WRITE_FLAG_COPY +#error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" +#endif +#if NETCONN_MORE != TCP_WRITE_FLAG_MORE +#error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" +#endif +#endif /* LWIP_NETCONN && LWIP_TCP */ +#if LWIP_SOCKET +#endif /* LWIP_SOCKET */ + + +/* Compile-time checks for deprecated options. + */ +#ifdef MEMP_NUM_TCPIP_MSG +#error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef TCP_REXMIT_DEBUG +#error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef RAW_STATS +#error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_QUEUE_FIRST +#error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." +#endif +#ifdef ETHARP_ALWAYS_INSERT +#error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." +#endif +#if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) +#error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" +#endif + +#ifndef LWIP_DISABLE_TCP_SANITY_CHECKS +#define LWIP_DISABLE_TCP_SANITY_CHECKS 0 +#endif +#ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS +#define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 +#endif + +/* MEMP sanity checks */ +#if MEMP_MEM_MALLOC +#if !LWIP_DISABLE_MEMP_SANITY_CHECKS +#if LWIP_NETCONN || LWIP_SOCKET +#if !MEMP_NUM_NETCONN && LWIP_SOCKET +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" +#endif +#else /* MEMP_MEM_MALLOC */ +#if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) +#error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_NETCONN || LWIP_SOCKET */ +#endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ +#if MEM_USE_POOLS +#error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" +#endif +#ifdef LWIP_HOOK_MEMP_AVAILABLE +#error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" +#endif +#endif /* MEMP_MEM_MALLOC */ + +/* TCP sanity checks */ +#if !LWIP_DISABLE_TCP_SANITY_CHECKS +#if LWIP_TCP +#if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) +#error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_BUF < (2 * TCP_MSS) +#error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= TCP_SND_BUF +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) +#error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" +#endif +#if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN +#error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) +#error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) +#error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#if TCP_WND < TCP_MSS +#error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." +#endif +#endif /* LWIP_TCP */ +#endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ + +/** + * @ingroup lwip_nosys + * Initialize all modules. + * Use this in NO_SYS mode. Use tcpip_init() otherwise. + */ +void +lwip_init(void) +{ +#ifndef LWIP_SKIP_CONST_CHECK + int a = 0; + LWIP_UNUSED_ARG(a); + LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void *, &a) == &a); +#endif +#ifndef LWIP_SKIP_PACKING_CHECK + LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); +#endif + + /* Modules initialization */ + stats_init(); +#if !NO_SYS + sys_init(); +#endif /* !NO_SYS */ + mem_init(); + memp_init(); + pbuf_init(); + netif_init(); +#if LWIP_IPV4 + ip_init(); +#if LWIP_ARP + etharp_init(); +#endif /* LWIP_ARP */ +#endif /* LWIP_IPV4 */ +#if LWIP_RAW + raw_init(); +#endif /* LWIP_RAW */ +#if LWIP_UDP + udp_init(); +#endif /* LWIP_UDP */ +#if LWIP_TCP + tcp_init(); +#endif /* LWIP_TCP */ +#if LWIP_IGMP + igmp_init(); +#endif /* LWIP_IGMP */ +#if LWIP_DNS + dns_init(); +#endif /* LWIP_DNS */ +#if PPP_SUPPORT + ppp_init(); +#endif + +#if LWIP_TIMERS + sys_timeouts_init(); +#endif /* LWIP_TIMERS */ +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c new file mode 100644 index 0000000..d611315 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ip.c @@ -0,0 +1,167 @@ +/** + * @file + * Common IPv4 and IPv6 code + * + * @defgroup ip IP + * @ingroup callbackstyle_api + * + * @defgroup ip4 IPv4 + * @ingroup ip + * + * @defgroup ip6 IPv6 + * @ingroup ip + * + * @defgroup ipaddr IP address handling + * @ingroup infrastructure + * + * @defgroup ip4addr IPv4 only + * @ingroup ipaddr + * + * @defgroup ip6addr IPv6 only + * @ingroup ipaddr + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 || LWIP_IPV6 + +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +/** Global data for both IPv4 and IPv6 */ +struct ip_globals ip_data; + +#if LWIP_IPV4 && LWIP_IPV6 + +const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; + +/** + * @ingroup ipaddr + * Convert numeric IP address (both versions) into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char *ipaddr_ntoa(const ip_addr_t *addr) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa(ip_2_ip6(addr)); + } else { + return ip4addr_ntoa(ip_2_ip4(addr)); + } +} + +/** + * @ingroup ipaddr + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char *ipaddr_ntoa_r(const ip_addr_t *addr, char *buf, int buflen) +{ + if (addr == NULL) { + return NULL; + } + if (IP_IS_V6(addr)) { + return ip6addr_ntoa_r(ip_2_ip6(addr), buf, buflen); + } else { + return ip4addr_ntoa_r(ip_2_ip4(addr), buf, buflen); + } +} + +/** + * @ingroup ipaddr + * Convert IP address string (both versions) to numeric. + * The version is auto-detected from the string. + * + * @param cp IP address string to convert + * @param addr conversion result is stored here + * @return 1 on success, 0 on error + */ +int +ipaddr_aton(const char *cp, ip_addr_t *addr) +{ + if (cp != NULL) { + const char *c; + for (c = cp; *c != 0; c++) { + if (*c == ':') { + /* contains a colon: IPv6 address */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); + } + return ip6addr_aton(cp, ip_2_ip6(addr)); + } else if (*c == '.') { + /* contains a dot: IPv4 address */ + break; + } + } + /* call ip4addr_aton as fallback or if IPv4 was found */ + if (addr) { + IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); + } + return ip4addr_aton(cp, ip_2_ip4(addr)); + } + return 0; +} + +/** + * @ingroup lwip_nosys + * If both IP versions are enabled, this function can dispatch packets to the correct one. + * Don't call directly, pass to netif_add() and call netif->input(). + */ +err_t +ip_input(struct pbuf *p, struct netif *inp) +{ + if (p != NULL) { + if (IP_HDR_GET_VERSION(p->payload) == 6) { + return ip6_input(p, inp); + } + return ip4_input(p, inp); + } + return ERR_VAL; +} + +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + +#endif /* LWIP_IPV4 || LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c new file mode 100644 index 0000000..a454504 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/autoip.c @@ -0,0 +1,527 @@ +/** + * @file + * AutoIP Automatic LinkLocal IP Configuration + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + * @defgroup autoip AUTOIP + * @ingroup ip4 + * AUTOIP related functions + * USAGE: + * + * define @ref LWIP_AUTOIP 1 in your lwipopts.h + * Options: + * AUTOIP_TMR_INTERVAL msecs, + * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. + * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... + * + * Without DHCP: + * - Call autoip_start() after netif_add(). + * + * With DHCP: + * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. + * - Configure your DHCP Client. + * + * @see netifapi_autoip + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mem.h" +/* #include "lwip/udp.h" */ +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/autoip.h" +#include "lwip/etharp.h" +#include "lwip/prot/autoip.h" + +#include + +/** Pseudo random macro based on netif informations. + * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ +#ifndef LWIP_AUTOIP_RAND +#define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ + ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ + ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ + ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ + (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) +#endif /* LWIP_AUTOIP_RAND */ + +/** + * Macro that generates the initial IP address to be tried by AUTOIP. + * If you want to override this, define it to something else in lwipopts.h. + */ +#ifndef LWIP_AUTOIP_CREATE_SEED_ADDR +#define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ + lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ + ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) +#endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ + +/* static functions */ +static err_t autoip_arp_announce(struct netif *netif); +static void autoip_start_probing(struct netif *netif); + +/** + * @ingroup autoip + * Set a statically allocated struct autoip to work with. + * Using this prevents autoip_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct autoip + * @param autoip (uninitialised) autoip struct allocated by the application + */ +void +autoip_set_struct(struct netif *netif, struct autoip *autoip) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("autoip != NULL", autoip != NULL); + LWIP_ASSERT("netif already has a struct autoip set", + netif_autoip_data(netif) == NULL); + + /* clear data structure */ + memset(autoip, 0, sizeof(struct autoip)); + /* autoip->state = AUTOIP_STATE_OFF; */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); +} + +/** Restart AutoIP client and check the next address (conflict detected) + * + * @param netif The netif under AutoIP control + */ +static void +autoip_restart(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + autoip->tried_llipaddr++; + autoip_start(netif); +} + +/** + * Handle a IP address conflict after an ARP conflict detection + */ +static void +autoip_handle_arp_conflict(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where + a) means retreat on the first conflict and + b) allows to keep an already configured address when having only one + conflict in 10 seconds + We use option b) since it helps to improve the chance that one of the two + conflicting hosts may be able to retain its address. */ + + if (autoip->lastconflict > 0) { + /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); + + /* Active TCP sessions are aborted when removing the ip addresss */ + autoip_restart(netif); + } else { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); + autoip_arp_announce(netif); + autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * + * @param netif network interface on which create the IP-Address + * @param ipaddr ip address to initialize + */ +static void +autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 + * compliant to RFC 3927 Section 2.1 + * We have 254 * 256 possibilities */ + + u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); + addr += autoip->tried_llipaddr; + addr = AUTOIP_NET | (addr & 0xffff); + /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ + + if (addr < AUTOIP_RANGE_START) { + addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + if (addr > AUTOIP_RANGE_END) { + addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; + } + LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && + (addr <= AUTOIP_RANGE_END)); + ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), + ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); +} + +/** + * Sends an ARP probe from a network interface + * + * @param netif network interface used to send the probe + */ +static err_t +autoip_arp_probe(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + /* this works because netif->ip_addr is ANY */ + return etharp_request(netif, &autoip->llipaddr); +} + +/** + * Sends an ARP announce from a network interface + * + * @param netif network interface used to send the announce + */ +static err_t +autoip_arp_announce(struct netif *netif) +{ + return etharp_gratuitous(netif); +} + +/** + * Configure interface for use with current LL IP-Address + * + * @param netif network interface to configure with current LL IP-Address + */ +static err_t +autoip_bind(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + ip4_addr_t sn_mask, gw_addr; + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num, + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + IP4_ADDR(&sn_mask, 255, 255, 0, 0); + IP4_ADDR(&gw_addr, 0, 0, 0, 0); + + netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ + + return ERR_OK; +} + +/** + * @ingroup autoip + * Start AutoIP client + * + * @param netif network interface on which start the AutoIP client + */ +err_t +autoip_start(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + err_t result = ERR_OK; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + + /* Set IP-Address, Netmask and Gateway to 0 to make sure that + * ARP Packets are formed correctly + */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], + netif->name[1], (u16_t)netif->num)); + if (autoip == NULL) { + /* no AutoIP client attached yet? */ + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): starting new AUTOIP client\n")); + autoip = (struct autoip *)mem_calloc(1, sizeof(struct autoip)); + if (autoip == NULL) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_start(): could not allocate autoip\n")); + return ERR_MEM; + } + /* store this AutoIP client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); + } else { + autoip->state = AUTOIP_STATE_OFF; + autoip->ttw = 0; + autoip->sent_num = 0; + ip4_addr_set_zero(&autoip->llipaddr); + autoip->lastconflict = 0; + } + + autoip_create_addr(netif, &(autoip->llipaddr)); + autoip_start_probing(netif); + + return result; +} + +static void +autoip_start_probing(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + autoip->state = AUTOIP_STATE_PROBING; + autoip->sent_num = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + + /* time to wait to first probe, this is randomly + * chosen out of 0 to PROBE_WAIT seconds. + * compliant to RFC 3927 Section 2.2.1 + */ + autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); + + /* + * if we tried more then MAX_CONFLICTS we must limit our rate for + * acquiring and probing address + * compliant to RFC 3927 Section 2.2.1 + */ + if (autoip->tried_llipaddr > MAX_CONFLICTS) { + autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; + } +} + +/** + * Handle a possible change in the network configuration. + * + * If there is an AutoIP address configured, take the interface down + * and begin probing with the same address. + */ +void +autoip_network_changed(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { + autoip_start_probing(netif); + } +} + +/** + * @ingroup autoip + * Stop AutoIP client + * + * @param netif network interface on which stop the AutoIP client + */ +err_t +autoip_stop(struct netif *netif) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_ASSERT_CORE_LOCKED(); + if (autoip != NULL) { + autoip->state = AUTOIP_STATE_OFF; + if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + } + } + return ERR_OK; +} + +/** + * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds + */ +void +autoip_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct autoip *autoip = netif_autoip_data(netif); + /* only act on AutoIP configured interfaces */ + if (autoip != NULL) { + if (autoip->lastconflict > 0) { + autoip->lastconflict--; + } + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, + ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", + (u16_t)(autoip->state), autoip->ttw)); + + if (autoip->ttw > 0) { + autoip->ttw--; + } + + switch (autoip->state) { + case AUTOIP_STATE_PROBING: + if (autoip->ttw == 0) { + if (autoip->sent_num >= PROBE_NUM) { + /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ + autoip->state = AUTOIP_STATE_ANNOUNCING; + autoip_bind(netif); + /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP + which counts as an announcement */ + autoip->sent_num = 1; + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } else { + autoip_arp_probe(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); + autoip->sent_num++; + if (autoip->sent_num == PROBE_NUM) { + /* calculate time to wait to for announce */ + autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; + } else { + /* calculate time to wait to next probe */ + autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % + ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); + } + } + } + break; + + case AUTOIP_STATE_ANNOUNCING: + if (autoip->ttw == 0) { + autoip_arp_announce(netif); + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); + autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; + autoip->sent_num++; + + if (autoip->sent_num >= ANNOUNCE_NUM) { + autoip->state = AUTOIP_STATE_BOUND; + autoip->sent_num = 0; + autoip->ttw = 0; + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), + ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); + } + } + break; + + default: + /* nothing to do in other states */ + break; + } + } + } +} + +/** + * Handles every incoming ARP Packet, called by etharp_input(). + * + * @param netif network interface to use for autoip processing + * @param hdr Incoming ARP packet + */ +void +autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) +{ + struct autoip *autoip = netif_autoip_data(netif); + + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); + if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { + /* when ip.src == llipaddr && hw.src != netif->hwaddr + * + * when probing ip.dst == llipaddr && hw.src != netif->hwaddr + * we have a conflict and must solve it + */ + ip4_addr_t sipaddr, dipaddr; + struct eth_addr netifaddr; + SMEMCPY(netifaddr.addr, netif->hwaddr, ETH_HWADDR_LEN); + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). + */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + if (autoip->state == AUTOIP_STATE_PROBING) { + /* RFC 3927 Section 2.2.1: + * from beginning to after ANNOUNCE_WAIT + * seconds we have a conflict if + * ip.src == llipaddr OR + * ip.dst == llipaddr && hw.src != own hwaddr + */ + if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || + (ip4_addr_isany_val(sipaddr) && + ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Probe Conflict detected\n")); + autoip_restart(netif); + } + } else { + /* RFC 3927 Section 2.5: + * in any state we have a conflict if + * ip.src == llipaddr && hw.src != own hwaddr + */ + if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && + !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { + LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); + autoip_handle_arp_conflict(netif); + } + } + } +} + +/** check if AutoIP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), + * 0 otherwise + */ +u8_t +autoip_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { + struct autoip *autoip = netif_autoip_data(netif); + return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); + } + return 0; +} + +u8_t +autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) +{ + struct autoip *autoip = netif_autoip_data(netif); + return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); +} + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c new file mode 100644 index 0000000..89f9c41 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/dhcp.c @@ -0,0 +1,1990 @@ +/** + * @file + * Dynamic Host Configuration Protocol client + * + * @defgroup dhcp4 DHCPv4 + * @ingroup ip4 + * DHCP (IPv4) related functions + * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform + * with RFC 2131 and RFC 2132. + * + * @todo: + * - Support for interfaces other than Ethernet (SLIP, PPP, ...) + * + * Options: + * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) + * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) + * + * dhcp_start() starts a DHCP client instance which + * configures the interface by obtaining an IP address lease and maintaining it. + * + * Use dhcp_release() to end the lease and use dhcp_stop() + * to remove the DHCP client. + * + * @see LWIP_HOOK_DHCP_APPEND_OPTIONS + * @see LWIP_HOOK_DHCP_PARSE_OPTION + * + * @see netifapi_dhcp4 + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. + * + * Author: Leon Woestenberg + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/udp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/def.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/dns.h" +#include "lwip/etharp.h" +#include "lwip/prot/dhcp.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP_APPEND_OPTIONS +#define LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, state, msg, msg_type, options_len_ptr) +#endif +#ifndef LWIP_HOOK_DHCP_PARSE_OPTION +#define LWIP_HOOK_DHCP_PARSE_OPTION(netif, dhcp, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +/** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using + * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) + */ +#ifndef DHCP_CREATE_RAND_XID +#define DHCP_CREATE_RAND_XID 1 +#endif + +/** Default for DHCP_GLOBAL_XID is 0xABCD0000 + * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. + * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" + * \#define DHCP_GLOBAL_XID rand() + */ +#ifdef DHCP_GLOBAL_XID_HEADER +#include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ +#endif + +/** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU + * MTU is checked to be big enough in dhcp_start */ +#define DHCP_MAX_MSG_LEN(netif) (netif->mtu) +#define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 +/** Minimum length for reply before packet is parsed */ +#define DHCP_MIN_REPLY_LEN 44 + +#define REBOOT_TRIES 2 + +#if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS +#define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 +#endif + +/** Option handling: options are parsed in dhcp_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp_option_idx { + DHCP_OPTION_IDX_OVERLOAD = 0, + DHCP_OPTION_IDX_MSG_TYPE, + DHCP_OPTION_IDX_SERVER_ID, + DHCP_OPTION_IDX_LEASE_TIME, + DHCP_OPTION_IDX_T1, + DHCP_OPTION_IDX_T2, + DHCP_OPTION_IDX_SUBNET_MASK, + DHCP_OPTION_IDX_ROUTER, +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + DHCP_OPTION_IDX_DNS_SERVER, + DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + DHCP_OPTION_IDX_NTP_SERVER, + DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP_OPTION_IDX_MAX +}; + +/** Holds the decoded option values, only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; +/** Holds a flag which option was received and is contained in dhcp_rx_options_val, + only valid while in dhcp_recv. + @todo: move this into struct dhcp? */ +u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; + +static u8_t dhcp_discover_request_options[] = { + DHCP_OPTION_SUBNET_MASK, + DHCP_OPTION_ROUTER, + DHCP_OPTION_BROADCAST +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + , DHCP_OPTION_DNS_SERVER +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP_GET_NTP_SRV + , DHCP_OPTION_NTP +#endif /* LWIP_DHCP_GET_NTP_SRV */ +}; + +#ifdef DHCP_GLOBAL_XID +static u32_t xid; +static u8_t xid_initialised; +#endif /* DHCP_GLOBAL_XID */ + +#define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) +#define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) +#define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) +#define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) +#define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) +#define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) + +static struct udp_pcb *dhcp_pcb; +static u8_t dhcp_pcb_refcount; + +/* DHCP client state machine functions */ +static err_t dhcp_discover(struct netif *netif); +static err_t dhcp_select(struct netif *netif); +static void dhcp_bind(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +static err_t dhcp_decline(struct netif *netif); +#endif /* DHCP_DOES_ARP_CHECK */ +static err_t dhcp_rebind(struct netif *netif); +static err_t dhcp_reboot(struct netif *netif); +static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); + +/* receive, unfold, parse and free incoming messages */ +static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/* set the DHCP timers */ +static void dhcp_timeout(struct netif *netif); +static void dhcp_t1_timeout(struct netif *netif); +static void dhcp_t2_timeout(struct netif *netif); + +/* build outgoing messages */ +/* create a DHCP message, fill in common headers */ +static struct pbuf *dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len); +/* add a DHCP option (type, then length in bytes) */ +static u16_t dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len); +/* add option values */ +static u16_t dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value); +static u16_t dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value); +static u16_t dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value); +#if LWIP_NETIF_HOSTNAME +static u16_t dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif); +#endif /* LWIP_NETIF_HOSTNAME */ +/* always add the DHCP options trailer to end and pad */ +static void dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp_inc_pcb_refcount(void) +{ + if (dhcp_pcb_refcount == 0) { + LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); + + /* allocate UDP PCB */ + dhcp_pcb = udp_new(); + + if (dhcp_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_CLIENT); + udp_connect(dhcp_pcb, IP4_ADDR_ANY, LWIP_IANA_PORT_DHCP_SERVER); + udp_recv(dhcp_pcb, dhcp_recv, NULL); + } + + dhcp_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); + dhcp_pcb_refcount--; + + if (dhcp_pcb_refcount == 0) { + udp_remove(dhcp_pcb); + dhcp_pcb = NULL; + } +} + +/** + * Back-off the DHCP client (because of a received NAK response). + * + * Back-off the DHCP client because of a received NAK. Receiving a + * NAK means the client asked for something non-sensible, for + * example when it tries to renew a lease obtained on another network. + * + * We clear any existing set IP address and restart DHCP negotiation + * afresh (as per RFC2131 3.2.3). + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_nak(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* Change to a defined state - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* remove IP address from interface (must no longer be used, as per RFC2131) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + /* We can immediately restart discovery */ + dhcp_discover(netif); +} + +#if DHCP_DOES_ARP_CHECK +/** + * Checks if the offered IP address is already in use. + * + * It does so by sending an ARP request for the offered address and + * entering CHECKING state. If no ARP reply is received within a small + * interval, the address is assumed to be free for use by us. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_check(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], + (s16_t)netif->name[1])); + dhcp_set_state(dhcp, DHCP_STATE_CHECKING); + /* create an ARP query for the offered IP address, expecting that no host + responds, as the IP address should not be in use. */ + result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); + if (result != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 500; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); +} +#endif /* DHCP_DOES_ARP_CHECK */ + +/** + * Remember the configuration offered by a DHCP server. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_offer(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", + (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + /* obtain the server address */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { + dhcp->request_timeout = 0; /* stop timer */ + + ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", + ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + /* remember offered address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void *)netif)); + } +} + +/** + * Select a DHCP server offer out of all offers. + * + * Simply select the first offer received. + * + * @param netif the netif under DHCP control + * @return lwIP specific error (see error.h) + */ +static err_t +dhcp_select(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ERROR("dhcp_select: netif != NULL", (netif != NULL), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_select: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + /* MUST request the offered IP address */ + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REQUESTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* send broadcast to any DHCP server */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * The DHCP timer that checks for lease renewal/rebind timeouts. + * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). + */ +void +dhcp_coarse_tmr(void) +{ + struct netif *netif; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); + /* iterate through all network interfaces */ + NETIF_FOREACH(netif) { + /* only act on DHCP configured interfaces */ + struct dhcp *dhcp = netif_dhcp_data(netif); + if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { + /* compare lease time to expire timeout */ + if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); + /* this clients' lease time has expired */ + dhcp_release_and_stop(netif); + dhcp_start(netif); + /* timer is active (non zero), and triggers (zeroes) now? */ + } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); + /* this clients' rebind timeout triggered */ + dhcp_t2_timeout(netif); + /* timer is active (non zero), and triggers (zeroes) now */ + } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); + /* this clients' renewal timeout triggered */ + dhcp_t1_timeout(netif); + } + } + } +} + +/** + * DHCP transaction timeout handling (this function must be called every 500ms, + * see @ref DHCP_FINE_TIMER_MSECS). + * + * A DHCP server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCP request is timed out. + */ +void +dhcp_fine_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp *dhcp = netif_dhcp_data(netif); + /* only act on DHCP configured interfaces */ + if (dhcp != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp->request_timeout > 1) { + dhcp->request_timeout--; + } else if (dhcp->request_timeout == 1) { + dhcp->request_timeout--; + /* { dhcp->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp_timeout(netif); + } + } + } +} + +/** + * A DHCP negotiation transaction, or ARP request, has timed out. + * + * The timer that was started with the DHCP or ARP request has + * timed out, indicating no response was received in time. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); + /* back-off period has passed, or server selection timed out */ + if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); + dhcp_discover(netif); + /* receiving the requested lease timed out */ + } else if (dhcp->state == DHCP_STATE_REQUESTING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); + if (dhcp->tries <= 5) { + dhcp_select(netif); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); + dhcp_release_and_stop(netif); + dhcp_start(netif); + } +#if DHCP_DOES_ARP_CHECK + /* received no ARP reply for the offered address (which is good) */ + } else if (dhcp->state == DHCP_STATE_CHECKING) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); + if (dhcp->tries <= 1) { + dhcp_check(netif); + /* no ARP replies on the offered address, + looks like the IP address is indeed free */ + } else { + /* bind the interface to the offered address */ + dhcp_bind(netif); + } +#endif /* DHCP_DOES_ARP_CHECK */ + } else if (dhcp->state == DHCP_STATE_REBOOTING) { + if (dhcp->tries < REBOOT_TRIES) { + dhcp_reboot(netif); + } else { + dhcp_discover(netif); + } + } +} + +/** + * The renewal period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t1_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING)) { + /* just retry to renew - note that the rebind timer (t2) will + * eventually time-out if renew tries fail. */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t1_timeout(): must renew\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ + dhcp_renew(netif); + /* Calculate next timeout */ + if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t1_renew_time = (u16_t)((dhcp->t2_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * The rebind period has timed out. + * + * @param netif the netif under DHCP control + */ +static void +dhcp_t2_timeout(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); + if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || + (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { + /* just retry to rebind */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + ("dhcp_t2_timeout(): must rebind\n")); + /* This slightly different to RFC2131: DHCPREQUEST will be sent from state + DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ + dhcp_rebind(netif); + /* Calculate next timeout */ + if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { + dhcp->t2_rebind_time = (u16_t)((dhcp->t0_timeout - dhcp->lease_used) / 2); + } + } +} + +/** + * Handle a DHCP ACK packet + * + * @param netif the netif under DHCP control + */ +static void +dhcp_handle_ack(struct netif *netif, struct dhcp_msg *msg_in) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV + u8_t n; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ +#if LWIP_DHCP_GET_NTP_SRV + ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; +#endif + + /* clear options we might not get from the ACK */ + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* lease time given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { + /* remember offered lease time */ + dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); + } + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { + /* remember given renewal period */ + dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); + } else { + /* calculate safe periods for renewal */ + dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; + } + + /* renewal period given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { + /* remember given rebind period */ + dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); + } else { + /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ + dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; + } + + /* (y)our internet address */ + ip4_addr_copy(dhcp->offered_ip_addr, msg_in->yiaddr); + +#if LWIP_DHCP_BOOTP_FILE + /* copy boot server address, + boot file name copied in dhcp_parse_reply if not overloaded */ + ip4_addr_copy(dhcp->offered_si_addr, msg_in->siaddr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* subnet mask given? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { + /* remember given subnet mask */ + ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); + dhcp->subnet_mask_given = 1; + } else { + dhcp->subnet_mask_given = 0; + } + + /* gateway router */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { + ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); + } + +#if LWIP_DHCP_GET_NTP_SRV + /* NTP servers */ + for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { + ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); + } + dhcp_set_ntp_servers(n, ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + /* DNS servers */ + for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { + ip_addr_t dns_addr; + ip_addr_set_ip4_u32_val(dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); + dns_setserver(n, &dns_addr); + } +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +} + +/** + * @ingroup dhcp4 + * Set a statically allocated struct dhcp to work with. + * Using this prevents dhcp_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp (uninitialised) dhcp struct allocated by the application + */ +void +dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp != NULL", dhcp != NULL); + LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); +} + +/** + * @ingroup dhcp4 + * Removes a struct dhcp from a netif. + * + * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the + * struct dhcp since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp_cleanup(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp_data(netif) != NULL) { + mem_free(netif_dhcp_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); + } +} + +/** + * @ingroup dhcp4 + * Start DHCP negotiation for a network interface. + * + * If no DHCP client instance was attached to this interface, + * a new client is created first. If a DHCP client instance + * was already present, it restarts negotiation. + * + * @param netif The lwIP network interface + * @return lwIP error code + * - ERR_OK - No error + * - ERR_MEM - Out of memory + */ +err_t +dhcp_start(struct netif *netif) +{ + struct dhcp *dhcp; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); + LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* check MTU of the netif */ + if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); + return ERR_MEM; + } + + /* no DHCP client attached yet? */ + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); + dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); + if (dhcp == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); + return ERR_MEM; + } + + /* store this dhcp client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); + /* already has DHCP client attached */ + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + } + /* dhcp is cleared below, no need to reset flag*/ + } + + /* clear data structure */ + memset(dhcp, 0, sizeof(struct dhcp)); + /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return ERR_MEM; + } + dhcp->pcb_allocated = 1; + + if (!netif_is_link_up(netif)) { + /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ + dhcp_set_state(dhcp, DHCP_STATE_INIT); + return ERR_OK; + } + + /* (re)start the DHCP negotiation */ + result = dhcp_discover(netif); + if (result != ERR_OK) { + /* free resources allocated above */ + dhcp_release_and_stop(netif); + return ERR_MEM; + } + return result; +} + +/** + * @ingroup dhcp4 + * Inform a DHCP server of our manual configuration. + * + * This informs DHCP servers of our fixed IP address configuration + * by sending an INFORM message. It does not involve DHCP address + * configuration, it is just here to be nice to the network. + * + * @param netif The lwIP network interface + */ +void +dhcp_inform(struct netif *netif) +{ + struct dhcp dhcp; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ + return; + } + + memset(&dhcp, 0, sizeof(struct dhcp)); + dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, &dhcp, DHCP_INFORM, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, &dhcp, DHCP_STATE_INFORMING, msg_out, DHCP_INFORM, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); + + udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + + pbuf_free(p_out); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); + } + + dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ +} + +/** Handle a possible change in the network configuration. + * + * This enters the REBOOTING state to verify that the currently bound + * address is still valid. + */ +void +dhcp_network_changed(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + + if (!dhcp) { + return; + } + switch (dhcp->state) { + case DHCP_STATE_REBINDING: + case DHCP_STATE_RENEWING: + case DHCP_STATE_BOUND: + case DHCP_STATE_REBOOTING: + dhcp->tries = 0; + dhcp_reboot(netif); + break; + case DHCP_STATE_OFF: + /* stay off */ + break; + default: + LWIP_ASSERT("invalid dhcp->state", dhcp->state <= DHCP_STATE_BACKING_OFF); + /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the + state changes, SELECTING: continue with current 'rid' as we stay in the + same state */ +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + /* ensure we start with short timeouts, even if already discovering */ + dhcp->tries = 0; + dhcp_discover(netif); + break; + } +} + +#if DHCP_DOES_ARP_CHECK +/** + * Match an ARP reply with the offered IP address: + * check whether the offered IP address is not in use using ARP + * + * @param netif the network interface on which the reply was received + * @param addr The IP address we received a reply from + */ +void +dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) +{ + struct dhcp *dhcp; + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); + /* is a DHCP client doing an ARP check? */ + if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", + ip4_addr_get_u32(addr))); + /* did a host respond with the address we + were offered by the DHCP server? */ + if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { + /* we will not accept the offered address */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, + ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); + dhcp_decline(netif); + } + } +} + +/** + * Decline an offered lease. + * + * Tell the DHCP server we do not accept the offered address. + * One reason to decline the lease is when we find out the address + * is already in use by another host (through ARP). + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_decline(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); + dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DECLINE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_BACKING_OFF, msg_out, DHCP_DECLINE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* per section 4.4.4, broadcast DECLINE messages */ + result = udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_decline: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = 10 * 1000; + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} +#endif /* DHCP_DOES_ARP_CHECK */ + + +/** + * Start the DHCP process, discover a DHCP server. + * + * @param netif the netif under DHCP control + */ +static err_t +dhcp_discover(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result = ERR_OK; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); + + ip4_addr_set_any(&dhcp->offered_ip_addr); + dhcp_set_state(dhcp, DHCP_STATE_SELECTING); + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_SELECTING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER)\n")); + udp_sendto_if_src(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif, IP4_ADDR_ANY); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); + } + if (dhcp->tries < 255) { + dhcp->tries++; + } +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; + autoip_start(netif); + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + msecs = (u16_t)((dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + + +/** + * Bind the interface to the offered IP address. + * + * @param netif network interface to bind to the offered address + */ +static void +dhcp_bind(struct netif *netif) +{ + u32_t timeout; + struct dhcp *dhcp; + ip4_addr_t sn_mask, gw_addr; + LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); + dhcp = netif_dhcp_data(netif); + LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + /* reset time used of lease */ + dhcp->lease_used = 0; + + if (dhcp->offered_t0_lease != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); + timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t0_timeout = (u16_t)timeout; + if (dhcp->t0_timeout == 0) { + dhcp->t0_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease * 1000)); + } + + /* temporary DHCP lease? */ + if (dhcp->offered_t1_renew != 0xffffffffUL) { + /* set renewal period timer */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); + timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t1_timeout = (u16_t)timeout; + if (dhcp->t1_timeout == 0) { + dhcp->t1_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew * 1000)); + dhcp->t1_renew_time = dhcp->t1_timeout; + } + /* set renewal period timer */ + if (dhcp->offered_t2_rebind != 0xffffffffUL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); + timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; + if (timeout > 0xffff) { + timeout = 0xffff; + } + dhcp->t2_timeout = (u16_t)timeout; + if (dhcp->t2_timeout == 0) { + dhcp->t2_timeout = 1; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind * 1000)); + dhcp->t2_rebind_time = dhcp->t2_timeout; + } + + /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ + if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { + dhcp->t1_timeout = 0; + } + + if (dhcp->subnet_mask_given) { + /* copy offered network mask */ + ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); + } else { + /* subnet mask not given, choose a safe subnet mask given the network class */ + u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); + if (first_octet <= 127) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); + } else if (first_octet >= 192) { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); + } else { + ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); + } + } + + ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); + /* gateway address not given? */ + if (ip4_addr_isany_val(gw_addr)) { + /* copy network address */ + ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); + /* use first host address on network as gateway */ + ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); + } + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", + ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); + /* netif is now bound to DHCP leased address - set this before assigning the address + to ensure the callback can use dhcp_supplied_address() */ + dhcp_set_state(dhcp, DHCP_STATE_BOUND); + + netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); + /* interface is used by routing now that an address is set */ +} + +/** + * @ingroup dhcp4 + * Renew an existing DHCP lease at the involved DHCP server. + * + * @param netif network interface which must renew its lease + */ +err_t +dhcp_renew(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); + dhcp_set_state(dhcp, DHCP_STATE_RENEWING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_RENEWING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + result = udp_sendto_if(dhcp_pcb, p_out, &dhcp->server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + /* back-off on retries, but to a maximum of 20 seconds */ + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Rebind with a DHCP server for an existing DHCP lease. + * + * @param netif network interface which must rebind with a DHCP server + */ +static err_t +dhcp_rebind(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBINDING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN(netif)); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBINDING, msg_out, DHCP_DISCOVER, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * Enter REBOOTING state to verify an existing lease + * + * @param netif network interface which must reboot + */ +static err_t +dhcp_reboot(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + err_t result; + u16_t msecs; + u8_t i; + struct pbuf *p_out; + u16_t options_out_len; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); + dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); + + /* create and initialize the DHCP message header */ + p_out = dhcp_create_msg(netif, dhcp, DHCP_REQUEST, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); + options_out_len = dhcp_option_short(options_out_len, msg_out->options, DHCP_MAX_MSG_LEN_MIN_REQUIRED); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_REQUESTED_IP, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); + + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); + for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { + options_out_len = dhcp_option_byte(options_out_len, msg_out->options, dhcp_discover_request_options[i]); + } + +#if LWIP_NETIF_HOSTNAME + options_out_len = dhcp_option_hostname(options_out_len, msg_out->options, netif); +#endif /* LWIP_NETIF_HOSTNAME */ + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, DHCP_STATE_REBOOTING, msg_out, DHCP_REQUEST, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + /* broadcast to server */ + result = udp_sendto_if(dhcp_pcb, p_out, IP_ADDR_BROADCAST, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); + result = ERR_MEM; + } + if (dhcp->tries < 255) { + dhcp->tries++; + } + msecs = (u16_t)(dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000); + dhcp->request_timeout = (u16_t)((msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); + return result; +} + +/** + * @ingroup dhcp4 + * Release a DHCP lease and stop DHCP statemachine (and AUTOIP if LWIP_DHCP_AUTOIP_COOP). + * + * @param netif network interface + */ +void +dhcp_release_and_stop(struct netif *netif) +{ + struct dhcp *dhcp = netif_dhcp_data(netif); + ip_addr_t server_ip_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release_and_stop()\n")); + if (dhcp == NULL) { + return; + } + + /* already off? -> nothing to do */ + if (dhcp->state == DHCP_STATE_OFF) { + return; + } + + ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); + + /* clean old DHCP offer */ + ip_addr_set_zero_ip4(&dhcp->server_ip_addr); + ip4_addr_set_zero(&dhcp->offered_ip_addr); + ip4_addr_set_zero(&dhcp->offered_sn_mask); + ip4_addr_set_zero(&dhcp->offered_gw_addr); +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_set_zero(&dhcp->offered_si_addr); +#endif /* LWIP_DHCP_BOOTP_FILE */ + dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; + dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; + + /* send release message when current IP was assigned via DHCP */ + if (dhcp_supplied_address(netif)) { + /* create and initialize the DHCP message header */ + struct pbuf *p_out; + u16_t options_out_len; + p_out = dhcp_create_msg(netif, dhcp, DHCP_RELEASE, &options_out_len); + if (p_out != NULL) { + struct dhcp_msg *msg_out = (struct dhcp_msg *)p_out->payload; + options_out_len = dhcp_option(options_out_len, msg_out->options, DHCP_OPTION_SERVER_ID, 4); + options_out_len = dhcp_option_long(options_out_len, msg_out->options, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); + + LWIP_HOOK_DHCP_APPEND_OPTIONS(netif, dhcp, dhcp->state, msg_out, DHCP_RELEASE, &options_out_len); + dhcp_option_trailer(options_out_len, msg_out->options, p_out); + + udp_sendto_if(dhcp_pcb, p_out, &server_ip_addr, LWIP_IANA_PORT_DHCP_SERVER, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); + } else { + /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); + } + } + + /* remove IP address from interface (prevents routing from selecting this interface) */ + netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); + +#if LWIP_DHCP_AUTOIP_COOP + if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { + autoip_stop(netif); + dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; + } +#endif /* LWIP_DHCP_AUTOIP_COOP */ + + dhcp_set_state(dhcp, DHCP_STATE_OFF); + + if (dhcp->pcb_allocated != 0) { + dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ + dhcp->pcb_allocated = 0; + } +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +err_t +dhcp_release(struct netif *netif) +{ + dhcp_release_and_stop(netif); + return ERR_OK; +} + +/** + * @ingroup dhcp4 + * This function calls dhcp_release_and_stop() internally. + * @deprecated Use dhcp_release_and_stop() instead. + */ +void +dhcp_stop(struct netif *netif) +{ + dhcp_release_and_stop(netif); +} + +/* + * Set the DHCP state of a DHCP client. + * + * If the state changed, reset the number of tries. + */ +static void +dhcp_set_state(struct dhcp *dhcp, u8_t new_state) +{ + if (new_state != dhcp->state) { + dhcp->state = new_state; + dhcp->tries = 0; + dhcp->request_timeout = 0; + } +} + +/* + * Concatenate an option type and length field to the outgoing + * DHCP message. + * + */ +static u16_t +dhcp_option(u16_t options_out_len, u8_t *options, u8_t option_type, u8_t option_len) +{ + LWIP_ASSERT("dhcp_option: options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); + options[options_out_len++] = option_type; + options[options_out_len++] = option_len; + return options_out_len; +} +/* + * Concatenate a single byte to the outgoing DHCP message. + * + */ +static u16_t +dhcp_option_byte(u16_t options_out_len, u8_t *options, u8_t value) +{ + LWIP_ASSERT("dhcp_option_byte: options_out_len < DHCP_OPTIONS_LEN", options_out_len < DHCP_OPTIONS_LEN); + options[options_out_len++] = value; + return options_out_len; +} + +static u16_t +dhcp_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + LWIP_ASSERT("dhcp_option_short: options_out_len + 2 <= DHCP_OPTIONS_LEN", options_out_len + 2U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp_option_long(u16_t options_out_len, u8_t *options, u32_t value) +{ + LWIP_ASSERT("dhcp_option_long: options_out_len + 4 <= DHCP_OPTIONS_LEN", options_out_len + 4U <= DHCP_OPTIONS_LEN); + options[options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); + options[options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); + options[options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); + options[options_out_len++] = (u8_t)((value & 0x000000ffUL)); + return options_out_len; +} + +#if LWIP_NETIF_HOSTNAME +static u16_t +dhcp_option_hostname(u16_t options_out_len, u8_t *options, struct netif *netif) +{ + if (netif->hostname != NULL) { + size_t namelen = strlen(netif->hostname); + if (namelen > 0) { + size_t len; + const char *p = netif->hostname; + /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME + and 1 byte for trailer) */ + size_t available = DHCP_OPTIONS_LEN - options_out_len - 3; + LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); + len = LWIP_MIN(namelen, available); + LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); + options_out_len = dhcp_option(options_out_len, options, DHCP_OPTION_HOSTNAME, (u8_t)len); + while (len--) { + options_out_len = dhcp_option_byte(options_out_len, options, *p++); + } + } + } + return options_out_len; +} +#endif /* LWIP_NETIF_HOSTNAME */ + +/** + * Extract the DHCP message and the DHCP options. + * + * Extract the DHCP message and the DHCP options, each into a contiguous + * piece of memory. As a DHCP message is variable sized by its options, + * and also allows overriding some fields for options, the easy approach + * is to first unfold the options into a contiguous piece of memory, and + * use that further on. + * + */ +static err_t +dhcp_parse_reply(struct pbuf *p, struct dhcp *dhcp) +{ + u8_t *options; + u16_t offset; + u16_t offset_max; + u16_t options_idx; + u16_t options_idx_max; + struct pbuf *q; + int parse_file_as_options = 0; + int parse_sname_as_options = 0; + struct dhcp_msg *msg_in; +#if LWIP_DHCP_BOOTP_FILE + int file_overloaded = 0; +#endif + + LWIP_UNUSED_ARG(dhcp); + + /* clear received options */ + dhcp_clear_all_options(dhcp); + /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ + if (p->len < DHCP_SNAME_OFS) { + return ERR_BUF; + } + msg_in = (struct dhcp_msg *)p->payload; +#if LWIP_DHCP_BOOTP_FILE + /* clear boot file name */ + dhcp->boot_file_name[0] = 0; +#endif /* LWIP_DHCP_BOOTP_FILE */ + + /* parse options */ + + /* start with options field */ + options_idx = DHCP_OPTIONS_OFS; + /* parse options to the end of the received packet */ + options_idx_max = p->tot_len; +again: + q = p; + while ((q != NULL) && (options_idx >= q->len)) { + options_idx = (u16_t)(options_idx - q->len); + options_idx_max = (u16_t)(options_idx_max - q->len); + q = q->next; + } + if (q == NULL) { + return ERR_BUF; + } + offset = options_idx; + offset_max = options_idx_max; + options = (u8_t *)q->payload; + /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ + while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { + u8_t op = options[offset]; + u8_t len; + u8_t decode_len = 0; + int decode_idx = -1; + u16_t val_offset = (u16_t)(offset + 2); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* len byte might be in the next pbuf */ + if ((offset + 1) < q->len) { + len = options[offset + 1]; + } else { + len = (q->next != NULL ? ((u8_t *)q->next->payload)[0] : 0); + } + /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ + decode_len = len; + switch (op) { + /* case(DHCP_OPTION_END): handled above */ + case (DHCP_OPTION_PAD): + /* special option: no len encoded */ + decode_len = len = 0; + /* will be increased below */ + break; + case (DHCP_OPTION_SUBNET_MASK): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; + break; + case (DHCP_OPTION_ROUTER): + decode_len = 4; /* only copy the first given router */ + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_ROUTER; + break; +#if LWIP_DHCP_PROVIDE_DNS_SERVERS + case (DHCP_OPTION_DNS_SERVER): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of DNS servers */ + decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_DNS_SERVER; + break; +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ + case (DHCP_OPTION_LEASE_TIME): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_LEASE_TIME; + break; +#if LWIP_DHCP_GET_NTP_SRV + case (DHCP_OPTION_NTP): + /* special case: there might be more than one server */ + LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); + /* limit number of NTP servers */ + decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); + LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_NTP_SERVER; + break; +#endif /* LWIP_DHCP_GET_NTP_SRV*/ + case (DHCP_OPTION_OVERLOAD): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + /* decode overload only in options, not in file/sname: invalid packet */ + LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_OVERLOAD; + break; + case (DHCP_OPTION_MESSAGE_TYPE): + LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_MSG_TYPE; + break; + case (DHCP_OPTION_SERVER_ID): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_SERVER_ID; + break; + case (DHCP_OPTION_T1): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T1; + break; + case (DHCP_OPTION_T2): + LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); + decode_idx = DHCP_OPTION_IDX_T2; + break; + default: + decode_len = 0; + LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); + LWIP_HOOK_DHCP_PARSE_OPTION(ip_current_netif(), dhcp, dhcp->state, msg_in, + dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) ? (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) : 0, + op, len, q, val_offset); + break; + } + if (op == DHCP_OPTION_PAD) { + offset++; + } else { + if (offset + len + 2 > 0xFFFF) { + /* overflow */ + return ERR_BUF; + } + offset = (u16_t)(offset + len + 2); + if (decode_len > 0) { + u32_t value = 0; + u16_t copy_len; +decode_next: + LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); + if (!dhcp_option_given(dhcp, decode_idx)) { + copy_len = LWIP_MIN(decode_len, 4); + if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { + return ERR_BUF; + } + if (decode_len > 4) { + /* decode more than one u32_t */ + u16_t next_val_offset; + LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); + decode_len = (u8_t)(decode_len - 4); + next_val_offset = (u16_t)(val_offset + 4); + if (next_val_offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + val_offset = next_val_offset; + decode_idx++; + goto decode_next; + } else if (decode_len == 4) { + value = lwip_ntohl(value); + } else { + LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); + value = ((u8_t *)&value)[0]; + } + dhcp_got_option(dhcp, decode_idx); + dhcp_set_option_value(dhcp, decode_idx, value); + } + } + } + if (offset >= q->len) { + offset = (u16_t)(offset - q->len); + offset_max = (u16_t)(offset_max - q->len); + if (offset < offset_max) { + q = q->next; + LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); + options = (u8_t *)q->payload; + } else { + /* We've run out of bytes, probably no end marker. Don't proceed. */ + return ERR_BUF; + } + } + } + /* is this an overloaded message? */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { + u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); + dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); + if (overload == DHCP_OVERLOAD_FILE) { + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME) { + parse_sname_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); + } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { + parse_sname_as_options = 1; + parse_file_as_options = 1; + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); + } else { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); + } + } + if (parse_file_as_options) { + /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ + parse_file_as_options = 0; + options_idx = DHCP_FILE_OFS; + options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; +#if LWIP_DHCP_BOOTP_FILE + file_overloaded = 1; +#endif + goto again; + } else if (parse_sname_as_options) { + parse_sname_as_options = 0; + options_idx = DHCP_SNAME_OFS; + options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; + goto again; + } +#if LWIP_DHCP_BOOTP_FILE + if (!file_overloaded) { + /* only do this for ACK messages */ + if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && + (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) + /* copy bootp file name, don't care for sname (server hostname) */ + if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { + return ERR_BUF; + } + /* make sure the string is really NULL-terminated */ + dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; + } +#endif /* LWIP_DHCP_BOOTP_FILE */ + return ERR_OK; +} + +/** + * If an incoming DHCP message is in response to us, then trigger the state machine + */ +static void +dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp *dhcp = netif_dhcp_data(netif); + struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; + u8_t msg_type; + u8_t i; + struct dhcp_msg *msg_in; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ + if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void *)p, + ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < DHCP_MIN_REPLY_LEN) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + if (reply_msg->op != DHCP_BOOTREPLY) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); + goto free_pbuf_and_return; + } + /* iterate through hardware address and match against DHCP message */ + for (i = 0; i < netif->hwaddr_len && i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + if (netif->hwaddr[i] != reply_msg->chaddr[i]) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", + (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); + goto free_pbuf_and_return; + } + } + /* match transaction ID against what we expected */ + if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n", lwip_ntohl(reply_msg->xid), dhcp->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp_parse_reply(p, dhcp) != ERR_OK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCP message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); + /* obtain pointer to DHCP message type */ + if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); + goto free_pbuf_and_return; + } + + msg_in = (struct dhcp_msg *)p->payload; + /* read DHCP message type */ + msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); + /* message type is DHCP ACK? */ + if (msg_type == DHCP_ACK) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); + /* in requesting state? */ + if (dhcp->state == DHCP_STATE_REQUESTING) { + dhcp_handle_ack(netif, msg_in); +#if DHCP_DOES_ARP_CHECK + if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { + /* check if the acknowledged lease address is already in use */ + dhcp_check(netif); + } else { + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); + } +#else + /* bind interface to the acknowledged lease address */ + dhcp_bind(netif); +#endif + } + /* already bound to the given lease address? */ + else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || + (dhcp->state == DHCP_STATE_RENEWING)) { + dhcp_handle_ack(netif, msg_in); + dhcp_bind(netif); + } + } + /* received a DHCP_NAK in appropriate state? */ + else if ((msg_type == DHCP_NAK) && + ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || + (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); + dhcp_handle_nak(netif); + } + /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ + else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); + /* remember offered lease */ + dhcp_handle_offer(netif, msg_in); + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * Create a DHCP request, fill in common headers + * + * @param netif the netif under DHCP control + * @param dhcp dhcp control struct + * @param message_type message type of the request + */ +static struct pbuf * +dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type, u16_t *options_out_len) +{ + u16_t i; + struct pbuf *p_out; + struct dhcp_msg *msg_out; + u16_t options_out_len_loc; + +#ifndef DHCP_GLOBAL_XID + /** default global transaction identifier starting value (easy to match + * with a packet analyser). We simply increment for each new request. + * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one + * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + static u32_t xid; +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + static u32_t xid = 0xABCD0000; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ +#else + if (!xid_initialised) { + xid = DHCP_GLOBAL_XID; + xid_initialised = !xid_initialised; + } +#endif + LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", + (p_out->len >= sizeof(struct dhcp_msg))); + + /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ + if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { + /* reuse transaction identifier in retransmissions */ + if (dhcp->tries == 0) { +#if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) + xid = LWIP_RAND(); +#else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + xid++; +#endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ + } + dhcp->xid = xid; + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", xid)); + + msg_out = (struct dhcp_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp_msg)); + + msg_out->op = DHCP_BOOTREQUEST; + /* @todo: make link layer independent */ + msg_out->htype = LWIP_IANA_HWTYPE_ETHERNET; + msg_out->hlen = netif->hwaddr_len; + msg_out->xid = lwip_htonl(dhcp->xid); + /* we don't need the broadcast flag since we can receive unicast traffic + before being fully configured! */ + /* set ciaddr to netif->ip_addr based on message_type and state */ + if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || + ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ + ((dhcp->state == DHCP_STATE_RENEWING) || dhcp->state == DHCP_STATE_REBINDING))) { + ip4_addr_copy(msg_out->ciaddr, *netif_ip4_addr(netif)); + } + for (i = 0; i < LWIP_MIN(DHCP_CHADDR_LEN, NETIF_MAX_HWADDR_LEN); i++) { + /* copy netif hardware address (padded with zeroes through memset already) */ + msg_out->chaddr[i] = netif->hwaddr[i]; + } + msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); + /* Add option MESSAGE_TYPE */ + options_out_len_loc = dhcp_option(0, msg_out->options, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); + options_out_len_loc = dhcp_option_byte(options_out_len_loc, msg_out->options, message_type); + if (options_out_len) { + *options_out_len = options_out_len_loc; + } + return p_out; +} + +/** + * Add a DHCP message trailer + * + * Adds the END option to the DHCP message, and if + * necessary, up to three padding bytes. + */ +static void +dhcp_option_trailer(u16_t options_out_len, u8_t *options, struct pbuf *p_out) +{ + options[options_out_len++] = DHCP_OPTION_END; + /* packet is too small, or not 4 byte aligned? */ + while (((options_out_len < DHCP_MIN_OPTIONS_LEN) || (options_out_len & 3)) && + (options_out_len < DHCP_OPTIONS_LEN)) { + /* add a fill/padding byte */ + options[options_out_len++] = 0; + } + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + options_out_len)); +} + +/** check if DHCP supplied netif->ip_addr + * + * @param netif the netif to check + * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), + * 0 otherwise + */ +u8_t +dhcp_supplied_address(const struct netif *netif) +{ + if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { + struct dhcp *dhcp = netif_dhcp_data(netif); + return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || + (dhcp->state == DHCP_STATE_REBINDING); + } + return 0; +} + +#endif /* LWIP_IPV4 && LWIP_DHCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c new file mode 100644 index 0000000..570aaa3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/etharp.c @@ -0,0 +1,1204 @@ +/** + * @file + * Address Resolution Protocol module for IP over Ethernet + * + * Functionally, ARP is divided into two parts. The first maps an IP address + * to a physical address when sending a packet, and the second part answers + * requests from other machines for our physical address. + * + * This implementation complies with RFC 826 (Ethernet ARP). It supports + * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 + * if an interface calls etharp_gratuitous(our_netif) upon address change. + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/prot/iana.h" +#include "netif/ethernet.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Re-request a used ARP entry 1 minute before it would expire to prevent + * breaking a steadily used connection because the ARP entry timed out. */ +#define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) +#define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) + +/** the time an ARP entry stays pending after first request, + * for ARP_TMR_INTERVAL = 1000, this is + * 10 seconds. + * + * @internal Keep this number at least 2, otherwise it might + * run out instantly if the timeout occurs directly after a request. + */ +#define ARP_MAXPENDING 5 + +/** ARP states */ +enum etharp_state { + ETHARP_STATE_EMPTY = 0, + ETHARP_STATE_PENDING, + ETHARP_STATE_STABLE, + ETHARP_STATE_STABLE_REREQUESTING_1, + ETHARP_STATE_STABLE_REREQUESTING_2 +#if ETHARP_SUPPORT_STATIC_ENTRIES + , ETHARP_STATE_STATIC +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ +}; + +struct etharp_entry { +#if ARP_QUEUEING + /** Pointer to queue of pending outgoing packets on this ARP entry. */ + struct etharp_q_entry *q; +#else /* ARP_QUEUEING */ + /** Pointer to a single pending outgoing packet on this ARP entry. */ + struct pbuf *q; +#endif /* ARP_QUEUEING */ + ip4_addr_t ipaddr; + struct netif *netif; + struct eth_addr ethaddr; + u16_t ctime; + u8_t state; +}; + +static struct etharp_entry arp_table[ARP_TABLE_SIZE]; + +#if !LWIP_NETIF_HWADDRHINT +static netif_addr_idx_t etharp_cached_entry; +#endif /* !LWIP_NETIF_HWADDRHINT */ + +/** Try hard to create a new entry - we want the IP address to appear in + the cache (even if this means removing an active entry or so). */ +#define ETHARP_FLAG_TRY_HARD 1 +#define ETHARP_FLAG_FIND_ONLY 2 +#if ETHARP_SUPPORT_STATIC_ENTRIES +#define ETHARP_FLAG_STATIC_ENTRY 4 +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +#if LWIP_NETIF_HWADDRHINT +#define ETHARP_SET_ADDRHINT(netif, addrhint) do { if (((netif) != NULL) && ((netif)->hints != NULL)) { \ + (netif)->hints->addr_hint = (addrhint); }} while(0) +#else /* LWIP_NETIF_HWADDRHINT */ +#define ETHARP_SET_ADDRHINT(netif, addrhint) (etharp_cached_entry = (addrhint)) +#endif /* LWIP_NETIF_HWADDRHINT */ + + +/* Check for maximum ARP_TABLE_SIZE */ +#if (ARP_TABLE_SIZE > NETIF_ADDR_IDX_MAX) +#error "ARP_TABLE_SIZE must fit in an s16_t, you have to reduce it in your lwipopts.h" +#endif + + +static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr); +static err_t etharp_raw(struct netif *netif, + const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode); + +#if ARP_QUEUEING +/** + * Free a complete queue of etharp entries + * + * @param q a qeueue of etharp_q_entry's to free + */ +static void +free_etharp_q(struct etharp_q_entry *q) +{ + struct etharp_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ARP_QUEUE, r); + } +} +#else /* ARP_QUEUEING */ + +/** Compatibility define: free the queued pbuf */ +#define free_etharp_q(q) pbuf_free(q) + +#endif /* ARP_QUEUEING */ + +/** Clean up ARP table entries */ +static void +etharp_free_entry(int i) +{ + /* remove from SNMP ARP index tree */ + mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); + /* and empty packet queue */ + if (arp_table[i].q != NULL) { + /* remove all queued packets */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); + free_etharp_q(arp_table[i].q); + arp_table[i].q = NULL; + } + /* recycle entry for re-use */ + arp_table[i].state = ETHARP_STATE_EMPTY; +#ifdef LWIP_DEBUG + /* for debugging, clean out the complete entry */ + arp_table[i].ctime = 0; + arp_table[i].netif = NULL; + ip4_addr_set_zero(&arp_table[i].ipaddr); + arp_table[i].ethaddr = ethzero; +#endif /* LWIP_DEBUG */ +} + +/** + * Clears expired entries in the ARP table. + * + * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), + * in order to expire entries in the ARP table. + */ +void +etharp_tmr(void) +{ + int i; + + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); + /* remove expired entries from the ARP table */ + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if (state != ETHARP_STATE_EMPTY +#if ETHARP_SUPPORT_STATIC_ENTRIES + && (state != ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + ) { + arp_table[i].ctime++; + if ((arp_table[i].ctime >= ARP_MAXAGE) || + ((arp_table[i].state == ETHARP_STATE_PENDING) && + (arp_table[i].ctime >= ARP_MAXPENDING))) { + /* pending or stable entry has become old! */ + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %d.\n", + arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", i)); + /* clean up entries that have just been expired */ + etharp_free_entry(i); + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { + /* Don't send more than one request every 2 seconds. */ + arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; + } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { + /* Reset state to stable, so that the next transmitted packet will + re-send an ARP request. */ + arp_table[i].state = ETHARP_STATE_STABLE; + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* still pending, resend an ARP query */ + etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); + } + } + } +} + +/** + * Search the ARP table for a matching or new entry. + * + * If an IP address is given, return a pending or stable ARP entry that matches + * the address. If no match is found, create a new entry with this address set, + * but in state ETHARP_EMPTY. The caller must check and possibly change the + * state of the returned entry. + * + * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. + * + * In all cases, attempt to create new entries from an empty entry. If no + * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle + * old entries. Heuristic choose the least important entry for recycling. + * + * @param ipaddr IP address to find in ARP cache, or to add if not found. + * @param flags See @ref etharp_state + * @param netif netif related to this address (used for NETIF_HWADDRHINT) + * + * @return The ARP entry index that matched or is created, ERR_MEM if no + * entry is found or could be recycled. + */ +static s16_t +etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif *netif) +{ + s16_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; + s16_t empty = ARP_TABLE_SIZE; + s16_t i = 0; + /* oldest entry with packets on queue */ + s16_t old_queue = ARP_TABLE_SIZE; + /* its age */ + u16_t age_queue = 0, age_pending = 0, age_stable = 0; + + LWIP_UNUSED_ARG(netif); + + /** + * a) do a search through the cache, remember candidates + * b) select candidate entry + * c) create new entry + */ + + /* a) in a single search sweep, do all of this + * 1) remember the first empty entry (if any) + * 2) remember the oldest stable entry (if any) + * 3) remember the oldest pending entry without queued packets (if any) + * 4) remember the oldest pending entry with queued packets (if any) + * 5) search for a matching IP entry, either pending or stable + * until 5 matches, or all entries are searched for. + */ + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + /* no empty entry found yet and now we do find one? */ + if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { + LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %d\n", (int)i)); + /* remember first empty entry */ + empty = i; + } else if (state != ETHARP_STATE_EMPTY) { + LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", + state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); + /* if given, does IP address match IP address in ARP entry? */ + if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) +#if ETHARP_TABLE_MATCH_NETIF + && ((netif == NULL) || (netif == arp_table[i].netif)) +#endif /* ETHARP_TABLE_MATCH_NETIF */ + ) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %d\n", (int)i)); + /* found exact IP address match, simply bail out */ + return i; + } + /* pending entry? */ + if (state == ETHARP_STATE_PENDING) { + /* pending with queued packets? */ + if (arp_table[i].q != NULL) { + if (arp_table[i].ctime >= age_queue) { + old_queue = i; + age_queue = arp_table[i].ctime; + } + } else + /* pending without queued packets? */ + { + if (arp_table[i].ctime >= age_pending) { + old_pending = i; + age_pending = arp_table[i].ctime; + } + } + /* stable entry? */ + } else if (state >= ETHARP_STATE_STABLE) { +#if ETHARP_SUPPORT_STATIC_ENTRIES + /* don't record old_stable for static entries since they never expire */ + if (state < ETHARP_STATE_STATIC) +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* remember entry with oldest stable entry in oldest, its age in maxtime */ + if (arp_table[i].ctime >= age_stable) { + old_stable = i; + age_stable = arp_table[i].ctime; + } + } + } + } + } + /* { we have no match } => try to create a new entry */ + + /* don't create new entry, only search? */ + if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || + /* or no empty entry found and not allowed to recycle? */ + ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); + return (s16_t)ERR_MEM; + } + + /* b) choose the least destructive entry to recycle: + * 1) empty entry + * 2) oldest stable entry + * 3) oldest pending entry without queued packets + * 4) oldest pending entry with queued packets + * + * { ETHARP_FLAG_TRY_HARD is set at this point } + */ + + /* 1) empty entry available? */ + if (empty < ARP_TABLE_SIZE) { + i = empty; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %d\n", (int)i)); + } else { + /* 2) found recyclable stable entry? */ + if (old_stable < ARP_TABLE_SIZE) { + /* recycle oldest stable*/ + i = old_stable; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %d\n", (int)i)); + /* no queued packets should exist on stable entries */ + LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); + /* 3) found recyclable pending entry without queued packets? */ + } else if (old_pending < ARP_TABLE_SIZE) { + /* recycle oldest pending */ + i = old_pending; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d (without queue)\n", (int)i)); + /* 4) found recyclable pending entry with queued packets? */ + } else if (old_queue < ARP_TABLE_SIZE) { + /* recycle oldest pending (queued packets are free in etharp_free_entry) */ + i = old_queue; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %d, freeing packet queue %p\n", (int)i, (void *)(arp_table[i].q))); + /* no empty or recyclable entries found */ + } else { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); + return (s16_t)ERR_MEM; + } + + /* { empty or recyclable entry found } */ + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + etharp_free_entry(i); + } + + LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); + LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", + arp_table[i].state == ETHARP_STATE_EMPTY); + + /* IP address given? */ + if (ipaddr != NULL) { + /* set IP address */ + ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); + } + arp_table[i].ctime = 0; +#if ETHARP_TABLE_MATCH_NETIF + arp_table[i].netif = netif; +#endif /* ETHARP_TABLE_MATCH_NETIF */ + return (s16_t)i; +} + +/** + * Update (or insert) a IP/MAC address pair in the ARP cache. + * + * If a pending entry is resolved, any queued packets will be sent + * at this point. + * + * @param netif netif related to this entry (used for NETIF_ADDRHINT) + * @param ipaddr IP address of the inserted ARP entry. + * @param ethaddr Ethernet address of the inserted ARP entry. + * @param flags See @ref etharp_state + * + * @return + * - ERR_OK Successfully updated ARP cache. + * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + * @see pbuf_free() + */ +static err_t +etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) +{ + s16_t i; + LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + /* non-unicast address? */ + if (ip4_addr_isany(ipaddr) || + ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, flags, netif); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + +#if ETHARP_SUPPORT_STATIC_ENTRIES + if (flags & ETHARP_FLAG_STATIC_ENTRY) { + /* record static type */ + arp_table[i].state = ETHARP_STATE_STATIC; + } else if (arp_table[i].state == ETHARP_STATE_STATIC) { + /* found entry is a static type, don't overwrite it */ + return ERR_VAL; + } else +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + { + /* mark it stable */ + arp_table[i].state = ETHARP_STATE_STABLE; + } + + /* record network interface */ + arp_table[i].netif = netif; + /* insert in SNMP ARP index tree */ + mib2_add_arp_entry(netif, &arp_table[i].ipaddr); + + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", i)); + /* update address */ + SMEMCPY(&arp_table[i].ethaddr, ethaddr, ETH_HWADDR_LEN); + /* reset time stamp */ + arp_table[i].ctime = 0; + /* this is where we will send out queued packets! */ +#if ARP_QUEUEING + while (arp_table[i].q != NULL) { + struct pbuf *p; + /* remember remainder of queue */ + struct etharp_q_entry *q = arp_table[i].q; + /* pop first item off the queue */ + arp_table[i].q = q->next; + /* get the packet pointer */ + p = q->p; + /* now queue entry can be freed */ + memp_free(MEMP_ARP_QUEUE, q); +#else /* ARP_QUEUEING */ + if (arp_table[i].q != NULL) { + struct pbuf *p = arp_table[i].q; + arp_table[i].q = NULL; +#endif /* ARP_QUEUEING */ + /* send the queued IP packet */ + ethernet_output(netif, p, (struct eth_addr *)(netif->hwaddr), ethaddr, ETHTYPE_IP); + /* free the queued IP packet */ + pbuf_free(p); + } + return ERR_OK; +} + +#if ETHARP_SUPPORT_STATIC_ENTRIES +/** Add a new static entry to the ARP table. If an entry exists for the + * specified IP address, this entry is overwritten. + * If packets are queued for the specified IP address, they are sent out. + * + * @param ipaddr IP address for the new static entry + * @param ethaddr ethernet address for the new static entry + * @return See return values of etharp_add_static_entry + */ +err_t +etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) +{ + struct netif *netif; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), + (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], + (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); + + netif = ip4_route(ipaddr); + if (netif == NULL) { + return ERR_RTE; + } + + return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); +} + +/** Remove a static entry from the ARP table previously added with a call to + * etharp_add_static_entry. + * + * @param ipaddr IP address of the static entry to remove + * @return ERR_OK: entry removed + * ERR_MEM: entry wasn't found + * ERR_ARG: entry wasn't a static entry but a dynamic one + */ +err_t +etharp_remove_static_entry(const ip4_addr_t *ipaddr) +{ + s16_t i; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); + + /* find or create ARP entry */ + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); + /* bail out if no entry could be found */ + if (i < 0) { + return (err_t)i; + } + + if (arp_table[i].state != ETHARP_STATE_STATIC) { + /* entry wasn't a static entry, cannot remove it */ + return ERR_ARG; + } + /* entry found, free it */ + etharp_free_entry(i); + return ERR_OK; +} +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +/** + * Remove all ARP table entries of the specified netif. + * + * @param netif points to a network interface + */ +void +etharp_cleanup_netif(struct netif *netif) +{ + int i; + + for (i = 0; i < ARP_TABLE_SIZE; ++i) { + u8_t state = arp_table[i].state; + if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { + etharp_free_entry(i); + } + } +} + +/** + * Finds (stable) ethernet/IP address pair from ARP table + * using interface and IP address index. + * @note the addresses in the ARP table are in network order! + * + * @param netif points to interface index + * @param ipaddr points to the (network order) IP address index + * @param eth_ret points to return pointer + * @param ip_ret points to return pointer + * @return table index if found, -1 otherwise + */ +ssize_t +etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) +{ + s16_t i; + + LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", + eth_ret != NULL && ip_ret != NULL); + + LWIP_UNUSED_ARG(netif); + + i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); + if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *eth_ret = &arp_table[i].ethaddr; + *ip_ret = &arp_table[i].ipaddr; + return i; + } + return -1; +} + +/** + * Possibility to iterate over stable ARP table entries + * + * @param i entry number, 0 to ARP_TABLE_SIZE + * @param ipaddr return value: IP address + * @param netif return value: points to interface + * @param eth_ret return value: ETH address + * @return 1 on valid index, 0 otherwise + */ +int +etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) +{ + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); + + if ((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { + *ipaddr = &arp_table[i].ipaddr; + *netif = arp_table[i].netif; + *eth_ret = &arp_table[i].ethaddr; + return 1; + } else { + return 0; + } +} + +/** + * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache + * send out queued IP packets. Updates cache with snooped address pairs. + * + * Should be called for incoming ARP packets. The pbuf in the argument + * is freed by this function. + * + * @param p The ARP packet that arrived on netif. Is freed by this function. + * @param netif The lwIP network interface on which the ARP packet pbuf arrived. + * + * @see pbuf_free() + */ +void +etharp_input(struct pbuf *p, struct netif *netif) +{ + struct etharp_hdr *hdr; + /* these are aligned properly, whereas the ARP header fields might not be */ + ip4_addr_t sipaddr, dipaddr; + u8_t for_us; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif != NULL", (netif != NULL), return;); + + hdr = (struct etharp_hdr *)p->payload; + + /* RFC 826 "Packet Reception": */ + if ((hdr->hwtype != PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET)) || + (hdr->hwlen != ETH_HWADDR_LEN) || + (hdr->protolen != sizeof(ip4_addr_t)) || + (hdr->proto != PP_HTONS(ETHTYPE_IP))) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", + hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); + ETHARP_STATS_INC(etharp.proterr); + ETHARP_STATS_INC(etharp.drop); + pbuf_free(p); + return; + } + ETHARP_STATS_INC(etharp.recv); + +#if LWIP_AUTOIP + /* We have to check if a host already has configured our random + * created link local address and continuously check if there is + * a host with this IP-address so we can detect collisions */ + autoip_arp_reply(netif, hdr); +#endif /* LWIP_AUTOIP */ + + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing (not using structure copy which breaks strict-aliasing rules). */ + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&sipaddr, &hdr->sipaddr); + IPADDR_WORDALIGNED_COPY_TO_IP4_ADDR_T(&dipaddr, &hdr->dipaddr); + + /* this interface is not configured? */ + if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + for_us = 0; + } else { + /* ARP packet directed to us? */ + for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); + } + + /* ARP message directed to us? + -> add IP address in ARP cache; assume requester wants to talk to us, + can result in directly sending the queued packets for this host. + ARP message not directed to us? + -> update the source IP address in the cache, if present */ + etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), + for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); + + /* now act on the message itself */ + switch (hdr->opcode) { + /* ARP request? */ + case PP_HTONS(ARP_REQUEST): + /* ARP request. If it asked for our address, we send out a + * reply. In any case, we time-stamp any existing ARP entry, + * and possibly send out an IP packet that was queued on it. */ + + LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); + /* ARP request for our address? */ + if (for_us) { + /* send ARP response */ + etharp_raw(netif, + (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), + &hdr->shwaddr, &sipaddr, + ARP_REPLY); + /* we are not configured? */ + } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* { for_us == 0 and netif->ip_addr.addr == 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); + /* request was not directed to us */ + } else { + /* { for_us == 0 and netif->ip_addr.addr != 0 } */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); + } + break; + case PP_HTONS(ARP_REPLY): + /* ARP reply. We already updated the ARP cache earlier. */ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); +#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) + /* DHCP wants to know about ARP replies from any host with an + * IP address also offered to us by the DHCP server. We do not + * want to take a duplicate IP address on a single network. + * @todo How should we handle redundant (fail-over) interfaces? */ + dhcp_arp_reply(netif, &sipaddr); +#endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ + break; + default: + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); + ETHARP_STATS_INC(etharp.err); + break; + } + /* free ARP packet */ + pbuf_free(p); +} + +/** Just a small helper function that sends a pbuf to an ethernet address + * in the arp_table specified by the index 'arp_idx'. + */ +static err_t +etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, netif_addr_idx_t arp_idx) +{ + LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", + arp_table[arp_idx].state >= ETHARP_STATE_STABLE); + /* if arp table entry is about to expire: re-request it, + but only if its state is ETHARP_STATE_STABLE to prevent flooding the + network with ARP requests if this address is used frequently. */ + if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { + if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { + /* issue a standard request using broadcast */ + if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { + /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ + if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { + arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; + } + } + } + + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); +} + +/** + * Resolve and fill-in Ethernet address header for outgoing IP packet. + * + * For IP multicast and broadcast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, the packet is submitted to etharp_query(). In + * case the IP address is outside the local network, the IP address of + * the gateway is used. + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ipaddr The IP address of the packet destination. + * + * @return + * - ERR_RTE No route to destination (no gateway to external networks), + * or the return type of either etharp_query() or ethernet_output(). + */ +err_t +etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) +{ + const struct eth_addr *dest; + struct eth_addr mcastaddr; + const ip4_addr_t *dst_addr = ipaddr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); + + /* Determine on destination hardware address. Broadcasts and multicasts + * are special, other IP addresses are looked up in the ARP table. */ + + /* broadcast destination IP address? */ + if (ip4_addr_isbroadcast(ipaddr, netif)) { + /* broadcast on Ethernet also */ + dest = (const struct eth_addr *)ðbroadcast; + /* multicast destination IP address? */ + } else if (ip4_addr_ismulticast(ipaddr)) { + /* Hash IP multicast address to MAC address.*/ + mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; + mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; + mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; + mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; + mcastaddr.addr[4] = ip4_addr3(ipaddr); + mcastaddr.addr[5] = ip4_addr4(ipaddr); + /* destination Ethernet address is multicast */ + dest = &mcastaddr; + /* unicast destination IP address? */ + } else { + netif_addr_idx_t i; + /* outside local network? if so, this can neither be a global broadcast nor + a subnet broadcast. */ + if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && + !ip4_addr_islinklocal(ipaddr)) { +#if LWIP_AUTOIP + struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr *, q->payload); + /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with + a link-local source address must always be "directly to its destination + on the same physical link. The host MUST NOT send the packet to any + router for forwarding". */ + if (!ip4_addr_islinklocal(&iphdr->src)) +#endif /* LWIP_AUTOIP */ + { +#ifdef LWIP_HOOK_ETHARP_GET_GW + /* For advanced routing, a single default gateway might not be enough, so get + the IP address of the gateway to handle the current destination address. */ + dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); + if (dst_addr == NULL) +#endif /* LWIP_HOOK_ETHARP_GET_GW */ + { + /* interface has default gateway? */ + if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { + /* send to hardware address of default gateway IP address */ + dst_addr = netif_ip4_gw(netif); + /* no default gateway available */ + } else { + /* no route to destination error (default gateway missing) */ + return ERR_RTE; + } + } + } + } +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t etharp_cached_entry = netif->hints->addr_hint; + if (etharp_cached_entry < ARP_TABLE_SIZE) { +#endif /* LWIP_NETIF_HWADDRHINT */ + if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[etharp_cached_entry].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { + /* the per-pcb-cached entry is stable and the right one! */ + ETHARP_STATS_INC(etharp.cachehit); + return etharp_output_to_arp_index(netif, q, etharp_cached_entry); + } +#if LWIP_NETIF_HWADDRHINT + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* find stable entry: do this here since this is a critical path for + throughput and etharp_find_entry() is kind of slow */ + for (i = 0; i < ARP_TABLE_SIZE; i++) { + if ((arp_table[i].state >= ETHARP_STATE_STABLE) && +#if ETHARP_TABLE_MATCH_NETIF + (arp_table[i].netif == netif) && +#endif + (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { + /* found an existing, stable entry */ + ETHARP_SET_ADDRHINT(netif, i); + return etharp_output_to_arp_index(netif, q, i); + } + } + /* no stable entry found, use the (slower) query function: + queue on destination Ethernet address belonging to ipaddr */ + return etharp_query(netif, dst_addr, q); + } + + /* continuation for multicast/broadcast destinations */ + /* obtain source Ethernet address of the given interface */ + /* send packet directly on the link */ + return ethernet_output(netif, q, (struct eth_addr *)(netif->hwaddr), dest, ETHTYPE_IP); +} + +/** + * Send an ARP request for the given IP address and/or queue a packet. + * + * If the IP address was not yet in the cache, a pending ARP cache entry + * is added and an ARP request is sent for the given address. The packet + * is queued on this entry. + * + * If the IP address was already pending in the cache, a new ARP request + * is sent for the given address. The packet is queued on this entry. + * + * If the IP address was already stable in the cache, and a packet is + * given, it is directly sent and no ARP request is sent out. + * + * If the IP address was already stable in the cache, and no packet is + * given, an ARP request is sent out. + * + * @param netif The lwIP network interface on which ipaddr + * must be queried for. + * @param ipaddr The IP address to be resolved. + * @param q If non-NULL, a pbuf that must be delivered to the IP address. + * q is not freed by this function. + * + * @note q must only be ONE packet, not a packet queue! + * + * @return + * - ERR_BUF Could not make room for Ethernet header. + * - ERR_MEM Hardware address unknown, and no more ARP entries available + * to query for address or queue the packet. + * - ERR_MEM Could not queue packet due to memory shortage. + * - ERR_RTE No route to destination (no gateway to external networks). + * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. + * + */ +err_t +etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) +{ + struct eth_addr *srcaddr = (struct eth_addr *)netif->hwaddr; + err_t result = ERR_MEM; + int is_new_entry = 0; + s16_t i_err; + netif_addr_idx_t i; + + /* non-unicast address? */ + if (ip4_addr_isbroadcast(ipaddr, netif) || + ip4_addr_ismulticast(ipaddr) || + ip4_addr_isany(ipaddr)) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); + return ERR_ARG; + } + + /* find entry in ARP cache, ask to create entry if queueing packet */ + i_err = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); + + /* could not find or create entry? */ + if (i_err < 0) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); + if (q) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); + ETHARP_STATS_INC(etharp.memerr); + } + return (err_t)i_err; + } + LWIP_ASSERT("type overflow", (size_t)i_err < NETIF_ADDR_IDX_MAX); + i = (netif_addr_idx_t)i_err; + + /* mark a fresh entry as pending (we just sent a request) */ + if (arp_table[i].state == ETHARP_STATE_EMPTY) { + is_new_entry = 1; + arp_table[i].state = ETHARP_STATE_PENDING; + /* record network interface for re-sending arp request in etharp_tmr */ + arp_table[i].netif = netif; + } + + /* { i is either a STABLE or (new or existing) PENDING entry } */ + LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", + ((arp_table[i].state == ETHARP_STATE_PENDING) || + (arp_table[i].state >= ETHARP_STATE_STABLE))); + + /* do we have a new entry? or an implicit query request? */ + if (is_new_entry || (q == NULL)) { + /* try to resolve it; send out ARP request */ + result = etharp_request(netif, ipaddr); + if (result != ERR_OK) { + /* ARP request couldn't be sent */ + /* We don't re-send arp request in etharp_tmr, but we still queue packets, + since this failure could be temporary, and the next packet calling + etharp_query again could lead to sending the queued packets. */ + } + if (q == NULL) { + return result; + } + } + + /* packet given? */ + LWIP_ASSERT("q != NULL", q != NULL); + /* stable entry? */ + if (arp_table[i].state >= ETHARP_STATE_STABLE) { + /* we have a valid IP->Ethernet address mapping */ + ETHARP_SET_ADDRHINT(netif, i); + /* send the packet */ + result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); + /* pending entry? (either just created or already pending */ + } else if (arp_table[i].state == ETHARP_STATE_PENDING) { + /* entry is still pending, queue the given packet 'q' */ + struct pbuf *p; + int copy_needed = 0; + /* IF q includes a pbuf that must be copied, copy the whole chain into a + * new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet could be taken over? */ + if (p != NULL) { + /* queue packet ... */ +#if ARP_QUEUEING + struct etharp_q_entry *new_entry; + /* allocate a new arp queue entry */ + new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); + if (new_entry != NULL) { + unsigned int qlen = 0; + new_entry->next = 0; + new_entry->p = p; + if (arp_table[i].q != NULL) { + /* queue was already existent, append the new entry to the end */ + struct etharp_q_entry *r; + r = arp_table[i].q; + qlen++; + while (r->next != NULL) { + r = r->next; + qlen++; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + arp_table[i].q = new_entry; + } +#if ARP_QUEUE_LEN + if (qlen >= ARP_QUEUE_LEN) { + struct etharp_q_entry *old; + old = arp_table[i].q; + arp_table[i].q = arp_table[i].q->next; + pbuf_free(old->p); + memp_free(MEMP_ARP_QUEUE, old); + } +#endif + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, i)); + result = ERR_OK; + } else { + /* the pool MEMP_ARP_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } +#else /* ARP_QUEUEING */ + /* always queue one packet per ARP request only, freeing a previously queued packet */ + if (arp_table[i].q != NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); + pbuf_free(arp_table[i].q); + } + arp_table[i].q = p; + result = ERR_OK; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"U16_F"\n", (void *)q, (u16_t)i)); +#endif /* ARP_QUEUEING */ + } else { + ETHARP_STATS_INC(etharp.memerr); + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); + result = ERR_MEM; + } + } + return result; +} + +/** + * Send a raw ARP packet (opcode and all addresses can be modified) + * + * @param netif the lwip network interface on which to send the ARP packet + * @param ethsrc_addr the source MAC address for the ethernet header + * @param ethdst_addr the destination MAC address for the ethernet header + * @param hwsrc_addr the source MAC address for the ARP protocol header + * @param ipsrc_addr the source IP address for the ARP protocol header + * @param hwdst_addr the destination MAC address for the ARP protocol header + * @param ipdst_addr the destination IP address for the ARP protocol header + * @param opcode the type of the ARP packet + * @return ERR_OK if the ARP packet has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, + const struct eth_addr *ethdst_addr, + const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, + const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, + const u16_t opcode) +{ + struct pbuf *p; + err_t result = ERR_OK; + struct etharp_hdr *hdr; + + LWIP_ASSERT("netif != NULL", netif != NULL); + + /* allocate a pbuf for the outgoing ARP request packet */ + p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); + /* could allocate a pbuf for an ARP request? */ + if (p == NULL) { + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("etharp_raw: could not allocate pbuf for ARP request.\n")); + ETHARP_STATS_INC(etharp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", + (p->len >= SIZEOF_ETHARP_HDR)); + + hdr = (struct etharp_hdr *)p->payload; + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); + hdr->opcode = lwip_htons(opcode); + + LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", + (netif->hwaddr_len == ETH_HWADDR_LEN)); + + /* Write the ARP MAC-Addresses */ + SMEMCPY(&hdr->shwaddr, hwsrc_addr, ETH_HWADDR_LEN); + SMEMCPY(&hdr->dhwaddr, hwdst_addr, ETH_HWADDR_LEN); + /* Copy struct ip4_addr_wordaligned to aligned ip4_addr, to support compilers without + * structure packing. */ + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->sipaddr, ipsrc_addr); + IPADDR_WORDALIGNED_COPY_FROM_IP4_ADDR_T(&hdr->dipaddr, ipdst_addr); + + hdr->hwtype = PP_HTONS(LWIP_IANA_HWTYPE_ETHERNET); + hdr->proto = PP_HTONS(ETHTYPE_IP); + /* set hwlen and protolen */ + hdr->hwlen = ETH_HWADDR_LEN; + hdr->protolen = sizeof(ip4_addr_t); + + /* send ARP query */ +#if LWIP_AUTOIP + /* If we are using Link-Local, all ARP packets that contain a Link-Local + * 'sender IP address' MUST be sent using link-layer broadcast instead of + * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ + if (ip4_addr_islinklocal(ipsrc_addr)) { + ethernet_output(netif, p, ethsrc_addr, ðbroadcast, ETHTYPE_ARP); + } else +#endif /* LWIP_AUTOIP */ + { + ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); + } + + ETHARP_STATS_INC(etharp.xmit); + /* free ARP query packet */ + pbuf_free(p); + p = NULL; + /* could not allocate pbuf for ARP request */ + + return result; +} + +/** + * Send an ARP request packet asking for ipaddr to a specific eth address. + * Used to send unicast request to refresh the ARP table just before an entry + * times out + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @param hw_dst_addr the ethernet address to send this packet to + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +static err_t +etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr *hw_dst_addr) +{ + return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, + (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), ðzero, + ipaddr, ARP_REQUEST); +} + +/** + * Send an ARP request packet asking for ipaddr. + * + * @param netif the lwip network interface on which to send the request + * @param ipaddr the IP address for which to ask + * @return ERR_OK if the request has been sent + * ERR_MEM if the ARP packet couldn't be allocated + * any other err_t on failure + */ +err_t +etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) +{ + LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); + return etharp_request_dst(netif, ipaddr, ðbroadcast); +} + +#endif /* LWIP_IPV4 && LWIP_ARP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c new file mode 100644 index 0000000..fd9054b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/icmp.c @@ -0,0 +1,404 @@ +/** + * @file + * ICMP - Internet Control Message Protocol + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* Some ICMP messages should be passed to the transport protocols. This + is not implemented. */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/stats.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be + * used to modify and send a response packet (and to 1 if this is not the case, + * e.g. when link header is stripped off when receiving) */ +#ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN +#define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + +/* The amount of data from the original packet to return in a dest-unreachable */ +#define ICMP_DEST_UNREACH_DATASIZE 8 + +static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); + +/** + * Processes ICMP input packets, called from ip_input(). + * + * Currently only processes icmp echo requests and sends + * out the echo response. + * + * @param p the icmp echo request packet, p->payload pointing to the icmp header + * @param inp the netif on which this packet was received + */ +void +icmp_input(struct pbuf *p, struct netif *inp) +{ + u8_t type; +#ifdef LWIP_DEBUG + u8_t code; +#endif /* LWIP_DEBUG */ + struct icmp_echo_hdr *iecho; + const struct ip_hdr *iphdr_in; + u16_t hlen; + const ip4_addr_t *src; + + ICMP_STATS_INC(icmp.recv); + MIB2_STATS_INC(mib2.icmpinmsgs); + + iphdr_in = ip4_current_header(); + hlen = IPH_HL_BYTES(iphdr_in); + if (hlen < IP_HLEN) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); + goto lenerr; + } + if (p->len < sizeof(u16_t) * 2) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); + goto lenerr; + } + + type = *((u8_t *)p->payload); +#ifdef LWIP_DEBUG + code = *(((u8_t *)p->payload) + 1); + /* if debug is enabled but debug statement below is somehow disabled: */ + LWIP_UNUSED_ARG(code); +#endif /* LWIP_DEBUG */ + switch (type) { + case ICMP_ER: + /* This is OK, echo reply might have been parsed by a raw PCB + (as obviously, an echo request has been sent, too). */ + MIB2_STATS_INC(mib2.icmpinechoreps); + break; + case ICMP_ECHO: + MIB2_STATS_INC(mib2.icmpinechos); + src = ip4_current_dest_addr(); + /* multicast destination address? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_MULTICAST_PING + /* For multicast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_MULTICAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); + goto icmperr; +#endif /* LWIP_MULTICAST_PING */ + } + /* broadcast destination address? */ + if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { +#if LWIP_BROADCAST_PING + /* For broadcast, use address of receiving interface as source address */ + src = netif_ip4_addr(inp); +#else /* LWIP_BROADCAST_PING */ + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); + goto icmperr; +#endif /* LWIP_BROADCAST_PING */ + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); + if (p->tot_len < sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); + goto lenerr; + } +#if CHECKSUM_CHECK_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { + if (inet_chksum_pbuf(p) != 0) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); + pbuf_free(p); + ICMP_STATS_INC(icmp.chkerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; + } + } +#endif +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN + if (pbuf_add_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + /* p is not big enough to contain link headers + * allocate a new one and copy p into it + */ + struct pbuf *r; + u16_t alloc_len = (u16_t)(p->tot_len + hlen); + if (alloc_len < p->tot_len) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed (tot_len overflow)\n")); + goto icmperr; + } + /* allocate new packet buffer with space for link headers */ + r = pbuf_alloc(PBUF_LINK, alloc_len, PBUF_RAM); + if (r == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); + goto icmperr; + } + if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); + pbuf_free(r); + goto icmperr; + } + /* copy the ip header */ + MEMCPY(r->payload, iphdr_in, hlen); + /* switch r->payload back to icmp header (cannot fail) */ + if (pbuf_remove_header(r, hlen)) { + LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); + pbuf_free(r); + goto icmperr; + } + /* copy the rest of the packet without ip header */ + if (pbuf_copy(r, p) != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); + pbuf_free(r); + goto icmperr; + } + /* free the original p */ + pbuf_free(p); + /* we now have an identical copy of p that has room for link headers */ + p = r; + } else { + /* restore p->payload to point to icmp header (cannot fail) */ + if (pbuf_remove_header(p, hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN)) { + LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); + goto icmperr; + } + } +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ + /* At this point, all checks are OK. */ + /* We generate an answer by switching the dest and src ip addresses, + * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ + iecho = (struct icmp_echo_hdr *)p->payload; + if (pbuf_add_header(p, hlen)) { + LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); + } else { + err_t ret; + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(iphdr->src, *src); + ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); + ICMPH_TYPE_SET(iecho, ICMP_ER); +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { + /* adjust the checksum */ + if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS((u16_t)(ICMP_ECHO << 8)) + 1); + } else { + iecho->chksum = (u16_t)(iecho->chksum + PP_HTONS(ICMP_ECHO << 8)); + } + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + iecho->chksum = 0; + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#else /* CHECKSUM_GEN_ICMP */ + iecho->chksum = 0; +#endif /* CHECKSUM_GEN_ICMP */ + + /* Set the correct TTL and recalculate the header checksum. */ + IPH_TTL_SET(iphdr, ICMP_TTL); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); + } +#endif /* CHECKSUM_GEN_IP */ + + ICMP_STATS_INC(icmp.xmit); + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + /* increase number of echo replies attempted to send */ + MIB2_STATS_INC(mib2.icmpoutechoreps); + + /* send an ICMP packet */ + ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, + ICMP_TTL, 0, IP_PROTO_ICMP, inp); + if (ret != ERR_OK) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); + } + } + break; + default: + if (type == ICMP_DUR) { + MIB2_STATS_INC(mib2.icmpindestunreachs); + } else if (type == ICMP_TE) { + MIB2_STATS_INC(mib2.icmpintimeexcds); + } else if (type == ICMP_PP) { + MIB2_STATS_INC(mib2.icmpinparmprobs); + } else if (type == ICMP_SQ) { + MIB2_STATS_INC(mib2.icmpinsrcquenchs); + } else if (type == ICMP_RD) { + MIB2_STATS_INC(mib2.icmpinredirects); + } else if (type == ICMP_TS) { + MIB2_STATS_INC(mib2.icmpintimestamps); + } else if (type == ICMP_TSR) { + MIB2_STATS_INC(mib2.icmpintimestampreps); + } else if (type == ICMP_AM) { + MIB2_STATS_INC(mib2.icmpinaddrmasks); + } else if (type == ICMP_AMR) { + MIB2_STATS_INC(mib2.icmpinaddrmaskreps); + } + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", + (s16_t)type, (s16_t)code)); + ICMP_STATS_INC(icmp.proterr); + ICMP_STATS_INC(icmp.drop); + } + pbuf_free(p); + return; +lenerr: + pbuf_free(p); + ICMP_STATS_INC(icmp.lenerr); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING +icmperr: + pbuf_free(p); + ICMP_STATS_INC(icmp.err); + MIB2_STATS_INC(mib2.icmpinerrors); + return; +#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ +} + +/** + * Send an icmp 'destination unreachable' packet, called from ip_input() if + * the transport layer protocol is unknown and from udp_input() if the local + * port is not bound. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'unreachable' packet + */ +void +icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) +{ + MIB2_STATS_INC(mib2.icmpoutdestunreachs); + icmp_send_response(p, ICMP_DUR, t); +} + +#if IP_FORWARD || IP_REASSEMBLY +/** + * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IP header + * @param t type of the 'time exceeded' packet + */ +void +icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) +{ + MIB2_STATS_INC(mib2.icmpouttimeexcds); + icmp_send_response(p, ICMP_TE, t); +} + +#endif /* IP_FORWARD || IP_REASSEMBLY */ + +/** + * Send an icmp packet in response to an incoming packet. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IP header + * @param type Type of the ICMP header + * @param code Code of the ICMP header + */ +static void +icmp_send_response(struct pbuf *p, u8_t type, u8_t code) +{ + struct pbuf *q; + struct ip_hdr *iphdr; + /* we can use the echo header here */ + struct icmp_echo_hdr *icmphdr; + ip4_addr_t iphdr_src; + struct netif *netif; + + /* increase number of messages attempted to send */ + MIB2_STATS_INC(mib2.icmpoutmsgs); + + /* ICMP header + IP header + 8 bytes of data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); + MIB2_STATS_INC(mib2.icmpouterrors); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp message", + (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); + + iphdr = (struct ip_hdr *)p->payload; + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); + LWIP_DEBUGF(ICMP_DEBUG, (" to ")); + ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); + LWIP_DEBUGF(ICMP_DEBUG, ("\n")); + + icmphdr = (struct icmp_echo_hdr *)q->payload; + icmphdr->type = type; + icmphdr->code = code; + icmphdr->id = 0; + icmphdr->seqno = 0; + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); + + ip4_addr_copy(iphdr_src, iphdr->src); +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + { + ip4_addr_t iphdr_dst; + ip4_addr_copy(iphdr_dst, iphdr->dest); + netif = ip4_route_src(&iphdr_dst, &iphdr_src); + } +#else + netif = ip4_route(&iphdr_src); +#endif + if (netif != NULL) { + /* calculate checksum */ + icmphdr->chksum = 0; +#if CHECKSUM_GEN_ICMP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { + icmphdr->chksum = inet_chksum(icmphdr, q->len); + } +#endif + ICMP_STATS_INC(icmp.xmit); + ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); + } + pbuf_free(q); +} + +#endif /* LWIP_IPV4 && LWIP_ICMP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c new file mode 100644 index 0000000..61f45d9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/igmp.c @@ -0,0 +1,801 @@ +/** + * @file + * IGMP - Internet Group Management Protocol + * + * @defgroup igmp IGMP + * @ingroup ip4 + * To be called from TCPIP thread + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +/*------------------------------------------------------------- +Note 1) +Although the rfc requires V1 AND V2 capability +we will only support v2 since now V1 is very old (August 1989) +V1 can be added if required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 2) +A query for a specific group address (as opposed to ALLHOSTS) +has now been implemented as I am unsure if it is required + +a debug print and statistic have been implemented to +show this up. +------------------------------------------------------------- +------------------------------------------------------------- +Note 3) +The router alert rfc 2113 is implemented in outgoing packets +but not checked rigorously incoming +------------------------------------------------------------- +Steve Reynolds +------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * RFC 988 - Host extensions for IP multicasting - V0 + * RFC 1054 - Host extensions for IP multicasting - + * RFC 1112 - Host extensions for IP multicasting - V1 + * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) + * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 + * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ + * RFC 2113 - IP Router Alert Option - + *----------------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------------- + * Includes + *----------------------------------------------------------------------------*/ + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/igmp.h" +#include "lwip/debug.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/prot/igmp.h" + +#include + +static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); +static err_t igmp_remove_group(struct netif *netif, struct igmp_group *group); +static void igmp_timeout(struct netif *netif, struct igmp_group *group); +static void igmp_start_timer(struct igmp_group *group, u8_t max_time); +static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); +static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); +static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); + +static ip4_addr_t allsystems; +static ip4_addr_t allrouters; + +/** + * Initialize the IGMP module + */ +void +igmp_init(void) +{ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); + + IP4_ADDR(&allsystems, 224, 0, 0, 1); + IP4_ADDR(&allrouters, 224, 0, 0, 2); +} + +/** + * Start IGMP processing on interface + * + * @param netif network interface on which start IGMP processing + */ +err_t +igmp_start(struct netif *netif) +{ + struct igmp_group *group; + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void *)netif)); + + group = igmp_lookup_group(netif, &allsystems); + + if (group != NULL) { + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->use++; + + /* Allow the igmp messages at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); + ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); + } + + return ERR_OK; + } + + return ERR_MEM; +} + +/** + * Stop IGMP processing on interface + * + * @param netif network interface on which stop IGMP processing + */ +err_t +igmp_stop(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); + + while (group != NULL) { + struct igmp_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_IGMP_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report IGMP memberships for this interface + * + * @param netif network interface on which report IGMP memberships + */ +void +igmp_report_groups(struct netif *netif) +{ + struct igmp_group *group = netif_igmp_data(netif); + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void *)netif)); + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (group != NULL) { + group = group->next; + } + + while (group != NULL) { + igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + group = group->next; + } +} + +/** + * Search for a group in the netif's igmp group list + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search for + * @return a struct igmp_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct igmp_group * +igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group = netif_igmp_data(ifp); + + while (group != NULL) { + if (ip4_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + /* to be clearer, we return NULL here instead of + * 'group' (which is also NULL at this point). + */ + return NULL; +} + +/** + * Search for a specific igmp group and create a new one if not found- + * + * @param ifp the network interface for which to look + * @param addr the group ip address to search + * @return a struct igmp_group*, + * NULL on memory error. + */ +static struct igmp_group * +igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) +{ + struct igmp_group *group; + struct igmp_group *list_head = netif_igmp_data(ifp); + + /* Search if the group already exists */ + group = igmp_lookfor_group(ifp, addr); + if (group != NULL) { + /* Group already exists. */ + return group; + } + + /* Group doesn't exist yet, create a new one */ + group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); + if (group != NULL) { + ip4_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = IGMP_GROUP_NON_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + + /* Ensure allsystems group is always first in list */ + if (list_head == NULL) { + /* this is the first entry in linked list */ + LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", + (ip4_addr_cmp(addr, &allsystems) != 0)); + group->next = NULL; + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); + } else { + /* append _after_ first entry */ + LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", + (ip4_addr_cmp(addr, &allsystems) == 0)); + group->next = list_head->next; + list_head->next = group; + } + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group ? "" : "impossible to "))); + ip4_addr_debug_print(IGMP_DEBUG, addr); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)ifp)); + + return group; +} + +/** + * Remove a group from netif's igmp group list, but don't free it yet + * + * @param group the group to remove from the netif's igmp group list + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +igmp_remove_group(struct netif *netif, struct igmp_group *group) +{ + err_t err = ERR_OK; + struct igmp_group *tmp_group; + + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { + if (tmp_group->next == group) { + tmp_group->next = group->next; + break; + } + } + /* Group not found in netif's igmp group list */ + if (tmp_group == NULL) { + err = ERR_ARG; + } + + return err; +} + +/** + * Called from ip_input() if a new IGMP packet is received. + * + * @param p received igmp packet, p->payload pointing to the igmp header + * @param inp network interface on which the packet was received + * @param dest destination ip address of the igmp packet + */ +void +igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) +{ + struct igmp_msg *igmp; + struct igmp_group *group; + struct igmp_group *groupref; + + IGMP_STATS_INC(igmp.recv); + + /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ + if (p->len < IGMP_MINLEN) { + pbuf_free(p); + IGMP_STATS_INC(igmp.lenerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); + return; + } + + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->src); + LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, ip4_current_header()->dest); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)inp)); + + /* Now calculate and check the checksum */ + igmp = (struct igmp_msg *)p->payload; + if (inet_chksum(igmp, p->len)) { + pbuf_free(p); + IGMP_STATS_INC(igmp.chkerr); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); + return; + } + + /* Packet is ok so find an existing group */ + group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ + + /* If group can be found or create... */ + if (!group) { + pbuf_free(p); + IGMP_STATS_INC(igmp.drop); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); + return; + } + + /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ + switch (igmp->igmp_msgtype) { + case IGMP_MEMB_QUERY: + /* IGMP_MEMB_QUERY to the "all systems" address ? */ + if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { + /* THIS IS THE GENERAL QUERY */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + + if (igmp->igmp_maxresp == 0) { + IGMP_STATS_INC(igmp.rx_v1); + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); + igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; + } else { + IGMP_STATS_INC(igmp.rx_general); + } + + groupref = netif_igmp_data(inp); + + /* Do not send messages on the all systems group address! */ + /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ + if (groupref != NULL) { + groupref = groupref->next; + } + + while (groupref) { + igmp_delaying_member(groupref, igmp->igmp_maxresp); + groupref = groupref->next; + } + } else { + /* IGMP_MEMB_QUERY to a specific group ? */ + if (!ip4_addr_isany(&igmp->igmp_group_address)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); + ip4_addr_debug_print_val(IGMP_DEBUG, igmp->igmp_group_address); + if (ip4_addr_cmp(dest, &allsystems)) { + ip4_addr_t groupaddr; + LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + /* we first need to re-look for the group since we used dest last time */ + ip4_addr_copy(groupaddr, igmp->igmp_group_address); + group = igmp_lookfor_group(inp, &groupaddr); + } else { + LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); + } + + if (group != NULL) { + IGMP_STATS_INC(igmp.rx_group); + igmp_delaying_member(group, igmp->igmp_maxresp); + } else { + IGMP_STATS_INC(igmp.drop); + } + } else { + IGMP_STATS_INC(igmp.proterr); + } + } + break; + case IGMP_V2_MEMB_REPORT: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); + IGMP_STATS_INC(igmp.rx_report); + if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { + /* This is on a specific group we have already looked up */ + group->timer = 0; /* stopped */ + group->group_state = IGMP_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + break; + default: + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", + igmp->igmp_msgtype, group->group_state, (void *)&group, (void *)inp)); + IGMP_STATS_INC(igmp.proterr); + break; + } + + pbuf_free(p); + return; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param ifaddr ip address of the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err = igmp_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Return an error even if some network interfaces are joined */ + /** @todo undo any other netif already joined */ + return err; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Join a group on one network interface. + * + * @param netif the network interface which should join a new group + * @param groupaddr the ip address of the group which to join + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group or create a new one if not found */ + group = igmp_lookup_group(netif, groupaddr); + + if (group != NULL) { + /* This should create a new group, check the state to make sure */ + if (group->group_state != IGMP_GROUP_NON_MEMBER) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); + } else { + /* OK - it was new group */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If first use of the group, allow the group at the MAC level */ + if ((group->use == 0) && (netif->igmp_mac_filter != NULL)) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + IGMP_STATS_INC(igmp.tx_join); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + + igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); + + /* Need to work out where this timer comes from */ + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } + /* Increment group use */ + group->use++; + /* Join on this interface */ + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); + return ERR_MEM; + } +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param ifaddr ip address of the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { + err_t res = igmp_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup igmp + * Leave a group on one network interface. + * + * @param netif the network interface which should leave a group + * @param groupaddr the ip address of the group which to leave + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) +{ + struct igmp_group *group; + + LWIP_ASSERT_CORE_LOCKED(); + + /* make sure it is multicast address */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); + + /* make sure it is an igmp-enabled netif */ + LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); + + /* find group */ + group = igmp_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Only send a leave if the flag is set according to the state diagram */ + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, ("\n")); + + /* If there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + igmp_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); + IGMP_STATS_INC(igmp.tx_leave); + igmp_send(netif, group, IGMP_LEAVE_GROUP); + } + + /* Disable the group at the MAC level */ + if (netif->igmp_mac_filter != NULL) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); + ip4_addr_debug_print(IGMP_DEBUG, groupaddr); + LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void *)netif)); + netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* Free group struct */ + memp_free(MEMP_IGMP_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + return ERR_OK; + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); + return ERR_VAL; + } +} + +/** + * The igmp timer function (both for NO_SYS=1 and =0) + * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). + */ +void +igmp_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct igmp_group *group = netif_igmp_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + igmp_timeout(netif, group); + } + } + group = group->next; + } + } +} + +/** + * Called if a timeout for one group is reached. + * Sends a report for this group. + * + * @param group an igmp_group for which a timeout is reached + */ +static void +igmp_timeout(struct netif *netif, struct igmp_group *group) +{ + /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group + (unless it is the allsystems group) */ + if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); + ip4_addr_debug_print_val(IGMP_DEBUG, group->group_address); + LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void *)netif)); + + group->group_state = IGMP_GROUP_IDLE_MEMBER; + + IGMP_STATS_INC(igmp.tx_report); + igmp_send(netif, group, IGMP_V2_MEMB_REPORT); + } +} + +/** + * Start a timer for an igmp group + * + * @param group the igmp_group for which to start a timer + * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with + * every call to igmp_tmr()) + */ +static void +igmp_start_timer(struct igmp_group *group, u8_t max_time) +{ +#ifdef LWIP_RAND + group->timer = (u16_t)(max_time > 2 ? (LWIP_RAND() % max_time) : 1); +#else /* LWIP_RAND */ + /* ATTENTION: use this only if absolutely necessary! */ + group->timer = max_time / 2; +#endif /* LWIP_RAND */ + + if (group->timer == 0) { + group->timer = 1; + } +} + +/** + * Delaying membership report for a group if necessary + * + * @param group the igmp_group for which "delaying" membership report + * @param maxresp query delay + */ +static void +igmp_delaying_member(struct igmp_group *group, u8_t maxresp) +{ + if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || + ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + igmp_start_timer(group, maxresp); + group->group_state = IGMP_GROUP_DELAYING_MEMBER; + } +} + + +/** + * Sends an IP packet on a network interface. This function constructs the IP header + * and calculates the IP header checksum. If the source IP address is NULL, + * the IP address of the outgoing network interface is filled in as source address. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + */ +static err_t +igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) +{ + /* This is the "router alert" option */ + u16_t ra[2]; + ra[0] = PP_HTONS(ROUTER_ALERT); + ra[1] = 0x0000; /* Router shall examine packet */ + IGMP_STATS_INC(igmp.xmit); + return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); +} + +/** + * Send an igmp packet to a specific group. + * + * @param group the group to which to send the packet + * @param type the type of igmp packet to send + */ +static void +igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) +{ + struct pbuf *p = NULL; + struct igmp_msg *igmp = NULL; + ip4_addr_t src = *IP4_ADDR_ANY4; + ip4_addr_t *dest = NULL; + + /* IP header + "router alert" option + IGMP header */ + p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); + + if (p) { + igmp = (struct igmp_msg *)p->payload; + LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", + (p->len >= sizeof(struct igmp_msg))); + ip4_addr_copy(src, *netif_ip4_addr(netif)); + + if (type == IGMP_V2_MEMB_REPORT) { + dest = &(group->group_address); + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + group->last_reporter_flag = 1; /* Remember we were the last to report */ + } else { + if (type == IGMP_LEAVE_GROUP) { + dest = &allrouters; + ip4_addr_copy(igmp->igmp_group_address, group->group_address); + } + } + + if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { + igmp->igmp_msgtype = type; + igmp->igmp_maxresp = 0; + igmp->igmp_checksum = 0; + igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); + + igmp_ip_output_if(p, &src, dest, netif); + } + + pbuf_free(p); + } else { + LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); + IGMP_STATS_INC(igmp.memerr); + } +} + +#endif /* LWIP_IPV4 && LWIP_IGMP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c new file mode 100644 index 0000000..00b09f9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4.c @@ -0,0 +1,1132 @@ +/** + * @file + * This is the IPv4 layer implementation for incoming and outgoing IP traffic. + * + * @see ip_frag.c + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/ip4_frag.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/igmp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/autoip.h" +#include "lwip/stats.h" +#include "lwip/prot/iana.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Set this to 0 in the rare case of wanting to call an extra function to + * generate the IP checksum (in contrast to calculating it on-the-fly). */ +#ifndef LWIP_INLINE_IP_CHKSUM +#if LWIP_CHECKSUM_CTRL_PER_NETIF +#define LWIP_INLINE_IP_CHKSUM 0 +#else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#define LWIP_INLINE_IP_CHKSUM 1 +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ +#endif + +#if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP +#define CHECKSUM_GEN_IP_INLINE 1 +#else +#define CHECKSUM_GEN_IP_INLINE 0 +#endif + +#if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 + +/** Some defines for DHCP to let link-layer-addressed packets through while the + * netif is down. + * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) + * to return 1 if the port is accepted and 0 if the port is not accepted. + */ +#if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) +/* accept DHCP client port and custom port */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) \ + || (LWIP_IP_ACCEPT_UDP_PORT(port))) +#elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept custom port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) +#else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ +/* accept DHCP client port only */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(LWIP_IANA_PORT_DHCP_CLIENT)) +#endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ + +#else /* LWIP_DHCP */ +#define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 +#endif /* LWIP_DHCP */ + +/** The IP header ID of the next outgoing IP packet */ +static u16_t ip_id; + +#if LWIP_MULTICAST_TX_OPTIONS +/** The default netif used for multicast */ +static struct netif *ip4_default_multicast_netif; + +/** + * @ingroup ip4 + * Set a default netif for IPv4 multicast. */ +void +ip4_set_default_multicast_netif(struct netif *default_multicast_netif) +{ + ip4_default_multicast_netif = default_multicast_netif; +} +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +/** + * Source based IPv4 routing must be fully implemented in + * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides the parameters. + */ +struct netif * +ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest) +{ + if (src != NULL) { + /* when src==NULL, the hook is called from ip4_route(dest) */ + struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(src, dest); + if (netif != NULL) { + return netif; + } + } + return ip4_route(dest); +} +#endif /* LWIP_HOOK_IP4_ROUTE_SRC */ + +/** + * Finds the appropriate network interface for a given IP address. It + * searches the list of network interfaces linearly. A match is found + * if the masked IP address of the network interface equals the masked + * IP address given to the function. + * + * @param dest the destination IP address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip4_route(const ip4_addr_t *dest) +{ +#if !LWIP_SINGLE_NETIF + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_MULTICAST_TX_OPTIONS + /* Use administratively selected interface for multicast by default */ + if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { + return ip4_default_multicast_netif; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + /* bug #54569: in case LWIP_SINGLE_NETIF=1 and LWIP_DEBUGF() disabled, the following loop is optimized away */ + LWIP_UNUSED_ARG(dest); + + /* iterate through netifs */ + NETIF_FOREACH(netif) { + /* is the netif up, does it have a link and a valid address? */ + if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { + /* network mask matches? */ + if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ + if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { + /* return netif on which to forward IP packet */ + return netif; + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, looopback traffic is passed through any netif */ + if (ip4_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC + netif = LWIP_HOOK_IP4_ROUTE_SRC(NULL, dest); + if (netif != NULL) { + return netif; + } +#elif defined(LWIP_HOOK_IP4_ROUTE) + netif = LWIP_HOOK_IP4_ROUTE(dest); + if (netif != NULL) { + return netif; + } +#endif +#endif /* !LWIP_SINGLE_NETIF */ + + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || + ip4_addr_isany_val(*netif_ip4_addr(netif_default)) || ip4_addr_isloopback(dest)) { + /* No matching netif found and default netif is not usable. + If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + MIB2_STATS_INC(mib2.ipoutnoroutes); + return NULL; + } + + return netif_default; +} + +#if IP_FORWARD +/** + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + * @param p the packet to forward + * @return 1: can forward 0: discard + */ +static int +ip4_canforward(struct pbuf *p) +{ + u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); + +#ifdef LWIP_HOOK_IP4_CANFORWARD + int ret = LWIP_HOOK_IP4_CANFORWARD(p, addr); + if (ret >= 0) { + return ret; + } +#endif /* LWIP_HOOK_IP4_CANFORWARD */ + + if (p->flags & PBUF_FLAG_LLBCAST) { + /* don't route link-layer broadcasts */ + return 0; + } + if ((p->flags & PBUF_FLAG_LLMCAST) || IP_MULTICAST(addr)) { + /* don't route link-layer multicasts (use LWIP_HOOK_IP4_CANFORWARD instead) */ + return 0; + } + if (IP_EXPERIMENTAL(addr)) { + return 0; + } + if (IP_CLASSA(addr)) { + u32_t net = addr & IP_CLASSA_NET; + if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { + /* don't route loopback packets */ + return 0; + } + } + return 1; +} + +/** + * Forwards an IP packet. It finds an appropriate route for the + * packet, decrements the TTL value of the packet, adjusts the + * checksum and outputs the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IP header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + PERF_START; + LWIP_UNUSED_ARG(inp); + + if (!ip4_canforward(p)) { + goto return_noroute; + } + + /* RFC3927 2.7: do not forward link-local addresses */ + if (ip4_addr_islinklocal(ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + goto return_noroute; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip4_route_src(ip4_current_src_addr(), ip4_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + /* @todo: send ICMP_DUR_NET? */ + goto return_noroute; + } +#if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); + goto return_noroute; + } +#endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ + + /* decrement TTL */ + IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); + /* send ICMP if TTL == 0 */ + if (IPH_TTL(iphdr) == 0) { + MIB2_STATS_INC(mib2.ipinhdrerrors); +#if LWIP_ICMP + /* Don't send ICMP messages in response to ICMP messages */ + if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { + icmp_time_exceeded(p, ICMP_TE_TTL); + } +#endif /* LWIP_ICMP */ + return; + } + + /* Incrementally update the IP checksum. */ + if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1)); + } else { + IPH_CHKSUM_SET(iphdr, (u16_t)(IPH_CHKSUM(iphdr) + PP_HTONS(0x100))); + } + + LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), + ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); + + IP_STATS_INC(ip.fw); + MIB2_STATS_INC(mib2.ipforwdatagrams); + IP_STATS_INC(ip.xmit); + + PERF_STOP("ip4_forward"); + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { +#if IP_FRAG + ip4_frag(p, netif, ip4_current_dest_addr()); +#else /* IP_FRAG */ + /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ +#endif /* IP_FRAG */ + } else { +#if LWIP_ICMP + /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ + icmp_dest_unreach(p, ICMP_DUR_FRAG); +#endif /* LWIP_ICMP */ + } + return; + } + /* transmit pbuf on chosen interface */ + netif->output(netif, p, ip4_current_dest_addr()); + return; +return_noroute: + MIB2_STATS_INC(mib2.ipoutnoroutes); +} +#endif /* IP_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip4_input_accept(struct netif *netif) +{ + LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", + ip4_addr_get_u32(ip4_current_dest_addr()), ip4_addr_get_u32(netif_ip4_addr(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), + ip4_addr_get_u32(ip4_current_dest_addr()) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); + + /* interface is up and configured? */ + if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { + /* unicast to this interface address? */ + if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || + /* or broadcast on this interface network address? */ + ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ + ) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#if LWIP_AUTOIP + /* connections to link-local addresses must persist after changing + the netif's address (RFC3927 ch. 1.9) */ + if (autoip_accept_packet(netif, ip4_current_dest_addr())) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + /* accept on this netif */ + return 1; + } +#endif /* LWIP_AUTOIP */ + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IP packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip_forward). The IP checksum is always checked. + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IP packet (p->payload points to IP header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip4_input(struct pbuf *p, struct netif *inp) +{ + const struct ip_hdr *iphdr; + struct netif *netif; + u16_t iphdr_hlen; + u16_t iphdr_len; +#if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP + int check_ip_src = 1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP_STATS_INC(ip.recv); + MIB2_STATS_INC(mib2.ipinreceives); + + /* identify the IP header */ + iphdr = (struct ip_hdr *)p->payload; + if (IPH_V(iphdr) != 4) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.err); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP4_INPUT + if (LWIP_HOOK_IP4_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* obtain IP header length in bytes */ + iphdr_hlen = IPH_HL_BYTES(iphdr); + /* obtain ip length in bytes */ + iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); + + /* Trim pbuf. This is especially required for packets < 60 bytes. */ + if (iphdr_len < p->tot_len) { + pbuf_realloc(p, iphdr_len); + } + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { + if (iphdr_hlen < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); + } + if (iphdr_hlen > p->len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_hlen, p->len)); + } + if (iphdr_len > p->tot_len) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + iphdr_len, p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.lenerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + + /* verify checksum */ +#if CHECKSUM_CHECK_IP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { + if (inet_chksum(iphdr, iphdr_hlen) != 0) { + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); + ip4_debug_print(p); + pbuf_free(p); + IP_STATS_INC(ip.chkerr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinhdrerrors); + return ERR_OK; + } + } +#endif + + /* copy IP addresses to aligned ip_addr_t */ + ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); + ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip4_addr_ismulticast(ip4_current_dest_addr())) { +#if LWIP_IGMP + if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { + /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ + ip4_addr_t allsystems; + IP4_ADDR(&allsystems, 224, 0, 0, 1); + if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && + ip4_addr_isany(ip4_current_src_addr())) { + check_ip_src = 0; + } + netif = inp; + } else { + netif = NULL; + } +#else /* LWIP_IGMP */ + if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { + netif = inp; + } else { + netif = NULL; + } +#endif /* LWIP_IGMP */ + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip4_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* Packets sent to the loopback address must not be accepted on an + * interface that does not have the loopback address assigned to it, + * unless a non-loopback interface is used for loopback traffic. */ + if (!ip4_addr_isloopback(ip4_current_dest_addr())) +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ + { +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip4_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } + } + } + +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed + * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. + * According to RFC 1542 section 3.1.1, referred by RFC 2131). + * + * If you want to accept private broadcast communication while a netif is down, + * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: + * + * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) + */ + if (netif == NULL) { + /* remote port is DHCP server? */ + if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { + const struct udp_hdr *udphdr = (const struct udp_hdr *)((const u8_t *)iphdr + iphdr_hlen); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", + lwip_ntohs(udphdr->dest))); + if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); + netif = inp; + check_ip_src = 0; + } + } + } +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + + /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ +#if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING + if (check_ip_src +#if IP_ACCEPT_LINK_LAYER_ADDRESSING + /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ + && !ip4_addr_isany_val(*ip4_current_src_addr()) +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ + ) +#endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ + { + if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || + (ip4_addr_ismulticast(ip4_current_src_addr()))) { + /* packet source is not valid */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + return ERR_OK; + } + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); +#if IP_FORWARD + /* non-broadcast packet? */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { + /* try to forward IP packet on (other) interfaces */ + ip4_forward(p, (struct ip_hdr *)p->payload, inp); + } else +#endif /* IP_FORWARD */ + { + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinaddrerrors); + MIB2_STATS_INC(mib2.ipindiscards); + } + pbuf_free(p); + return ERR_OK; + } + /* packet consists of multiple fragments? */ + if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { +#if IP_REASSEMBLY /* packet fragment reassembly code present? */ + LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", + lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK) * 8))); + /* reassemble the packet*/ + p = ip4_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + return ERR_OK; + } + iphdr = (const struct ip_hdr *)p->payload; +#else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ + pbuf_free(p); + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", + lwip_ntohs(IPH_OFFSET(iphdr)))); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; +#endif /* IP_REASSEMBLY */ + } + +#if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ + +#if LWIP_IGMP + /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ + if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { +#else + if (iphdr_hlen > IP_HLEN) { +#endif /* LWIP_IGMP */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); + pbuf_free(p); + IP_STATS_INC(ip.opterr); + IP_STATS_INC(ip.drop); + /* unsupported protocol feature */ + MIB2_STATS_INC(mib2.ipinunknownprotos); + return ERR_OK; + } +#endif /* IP_OPTIONS_ALLOWED == 0 */ + + /* send to upper layers */ + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); + ip4_debug_print(p); + LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_netif = netif; + ip_data.current_input_netif = inp; + ip_data.current_ip4_header = iphdr; + ip_data.current_ip_header_tot_len = IPH_HL_BYTES(iphdr); + +#if LWIP_RAW + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) +#endif /* LWIP_RAW */ + { + pbuf_remove_header(p, iphdr_hlen); /* Move to payload, no check necessary. */ + + switch (IPH_PROTO(iphdr)) { +#if LWIP_UDP + case IP_PROTO_UDP: +#if LWIP_UDPLITE + case IP_PROTO_UDPLITE: +#endif /* LWIP_UDPLITE */ + MIB2_STATS_INC(mib2.ipindelivers); + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP_PROTO_TCP: + MIB2_STATS_INC(mib2.ipindelivers); + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP + case IP_PROTO_ICMP: + MIB2_STATS_INC(mib2.ipindelivers); + icmp_input(p, inp); + break; +#endif /* LWIP_ICMP */ +#if LWIP_IGMP + case IP_PROTO_IGMP: + igmp_input(p, inp, ip4_current_dest_addr()); + break; +#endif /* LWIP_IGMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + MIB2_STATS_INC(mib2.ipindelivers); + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP + /* send ICMP destination protocol unreachable unless is was a broadcast */ + if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && + !ip4_addr_ismulticast(ip4_current_dest_addr())) { + pbuf_header_force(p, (s16_t)iphdr_hlen); /* Move to ip header, no check necessary. */ + icmp_dest_unreach(p, ICMP_DUR_PROTO); + } +#endif /* LWIP_ICMP */ + + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); + + IP_STATS_INC(ip.proterr); + IP_STATS_INC(ip.drop); + MIB2_STATS_INC(mib2.ipinunknownprotos); + } + pbuf_free(p); + break; + } + } + + /* @todo: this is not really necessary... */ + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip4_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip4_addr_set_any(ip4_current_src_addr()); + ip4_addr_set_any(ip4_current_dest_addr()); + + return ERR_OK; +} + +/** + * Sends an IP packet on a network interface. This function constructs + * the IP header and calculates the IP header checksum. If the source + * IP address is NULL, the IP address of the outgoing network + * interface is filled in as source address. + * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already + * include an IP header and p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IP/LINK headers + * returns errors returned by netif->output + * + * @note ip_id: RFC791 "some host may be able to simply use + * unique identifiers independent of destination" + */ +err_t +ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if() but with the possibility to include IP options: + * + * @ param ip_options pointer to the IP options, copied into the IP header + * @ param optlen length of ip_options + */ +err_t +ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + const ip4_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (ip4_addr_isany(src)) { + src_used = netif_ip4_addr(netif); + } + } + +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, + ip_options, optlen); +#else /* IP_OPTIONS_SEND */ + return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); +#endif /* IP_OPTIONS_SEND */ +} + +/** + * Same as ip_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, + u8_t proto, struct netif *netif) +{ +#if IP_OPTIONS_SEND + return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); +} + +/** + * Same as ip_output_if_opt() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen) +{ +#endif /* IP_OPTIONS_SEND */ + struct ip_hdr *iphdr; + ip4_addr_t dest_addr; +#if CHECKSUM_GEN_IP_INLINE + u32_t chk_sum = 0; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + MIB2_STATS_INC(mib2.ipoutrequests); + + /* Should the IP header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { + u16_t ip_hlen = IP_HLEN; +#if IP_OPTIONS_SEND + u16_t optlen_aligned = 0; + if (optlen != 0) { +#if CHECKSUM_GEN_IP_INLINE + int i; +#endif /* CHECKSUM_GEN_IP_INLINE */ + if (optlen > (IP_HLEN_MAX - IP_HLEN)) { + /* optlen too long */ + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: optlen too long\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_VAL; + } + /* round up to a multiple of 4 */ + optlen_aligned = (u16_t)((optlen + 3) & ~3); + ip_hlen = (u16_t)(ip_hlen + optlen_aligned); + /* First write in the IP options */ + if (pbuf_add_header(p, optlen_aligned)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + MEMCPY(p->payload, ip_options, optlen); + if (optlen < optlen_aligned) { + /* zero the remaining bytes */ + memset(((char *)p->payload) + optlen, 0, (size_t)(optlen_aligned - optlen)); + } +#if CHECKSUM_GEN_IP_INLINE + for (i = 0; i < optlen_aligned / 2; i++) { + chk_sum += ((u16_t *)p->payload)[i]; + } +#endif /* CHECKSUM_GEN_IP_INLINE */ + } +#endif /* IP_OPTIONS_SEND */ + /* generate IP header */ + if (pbuf_add_header(p, IP_HLEN)) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); + + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + + iphdr = (struct ip_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", + (p->len >= sizeof(struct ip_hdr))); + + IPH_TTL_SET(iphdr, ttl); + IPH_PROTO_SET(iphdr, proto); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(proto | (ttl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + + /* dest cannot be NULL here */ + ip4_addr_copy(iphdr->dest, *dest); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; +#endif /* CHECKSUM_GEN_IP_INLINE */ + + IPH_VHL_SET(iphdr, 4, ip_hlen / 4); + IPH_TOS_SET(iphdr, tos); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_len; +#endif /* CHECKSUM_GEN_IP_INLINE */ + IPH_OFFSET_SET(iphdr, 0); + IPH_ID_SET(iphdr, lwip_htons(ip_id)); +#if CHECKSUM_GEN_IP_INLINE + chk_sum += iphdr->_id; +#endif /* CHECKSUM_GEN_IP_INLINE */ + ++ip_id; + + if (src == NULL) { + ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); + } else { + /* src cannot be NULL here */ + ip4_addr_copy(iphdr->src, *src); + } + +#if CHECKSUM_GEN_IP_INLINE + chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; + chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; + chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); + chk_sum = (chk_sum >> 16) + chk_sum; + chk_sum = ~chk_sum; + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + iphdr->_chksum = (u16_t)chk_sum; /* network order */ + } +#if LWIP_CHECKSUM_CTRL_PER_NETIF + else { + IPH_CHKSUM_SET(iphdr, 0); + } +#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ +#else /* CHECKSUM_GEN_IP_INLINE */ + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); + } +#endif /* CHECKSUM_GEN_IP */ +#endif /* CHECKSUM_GEN_IP_INLINE */ + } else { + /* IP header already included in p */ + if (p->len < IP_HLEN) { + LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: LWIP_IP_HDRINCL but pbuf is too short\n")); + IP_STATS_INC(ip.err); + MIB2_STATS_INC(mib2.ipoutdiscards); + return ERR_BUF; + } + iphdr = (struct ip_hdr *)p->payload; + ip4_addr_copy(dest_addr, iphdr->dest); + dest = &dest_addr; + } + + IP_STATS_INC(ip.xmit); + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip4_debug_print(p); + +#if ENABLE_LOOPBACK + if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) +#if !LWIP_HAVE_LOOPIF + || ip4_addr_isloopback(dest) +#endif /* !LWIP_HAVE_LOOPIF */ + ) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); + return netif_loop_output(netif, p); + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if IP_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif->mtu && (p->tot_len > netif->mtu)) { + return ip4_frag(p, netif, dest); + } +#endif /* IP_FRAG */ + + LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); + return netif->output(netif, p, dest); +} + +/** + * Simple interface to ip_output_if. It finds the outgoing network + * interface and calls upon ip_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto) +{ + struct netif *netif; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + return ip4_output_if(p, src, dest, ttl, tos, proto, netif); +} + +#if LWIP_NETIF_USE_HINTS +/** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IP header and p->payload points to that IP header) + * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the + * IP address of the netif used to send is used as source address) + * @param dest the destination IP address to send the packet to + * @param ttl the TTL value to be set in the IP header + * @param tos the TOS value to be set in the IP header + * @param proto the PROTOCOL to be set in the IP header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint) +{ + struct netif *netif; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if ((netif = ip4_route_src(src, dest)) == NULL) { + LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); + IP_STATS_INC(ip.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if IP_DEBUG +/* Print an IP header by using LWIP_DEBUGF + * @param p an IP packet, p->payload pointing to the IP header + */ +void +ip4_debug_print(struct pbuf *p) +{ + struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; + + LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", + (u16_t)IPH_V(iphdr), + (u16_t)IPH_HL(iphdr), + (u16_t)IPH_TOS(iphdr), + lwip_ntohs(IPH_LEN(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", + lwip_ntohs(IPH_ID(iphdr)), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), + (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", + (u16_t)IPH_TTL(iphdr), + (u16_t)IPH_PROTO(iphdr), + lwip_ntohs(IPH_CHKSUM(iphdr)))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", + ip4_addr1_16_val(iphdr->src), + ip4_addr2_16_val(iphdr->src), + ip4_addr3_16_val(iphdr->src), + ip4_addr4_16_val(iphdr->src))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", + ip4_addr1_16_val(iphdr->dest), + ip4_addr2_16_val(iphdr->dest), + ip4_addr3_16_val(iphdr->dest), + ip4_addr4_16_val(iphdr->dest))); + LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP_DEBUG */ + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c new file mode 100644 index 0000000..92077c2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_addr.c @@ -0,0 +1,321 @@ +/** + * @file + * This is the IPv4 address tools implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip_addr.h" +#include "lwip/netif.h" + +/* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ +const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); +const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); + +/** + * Determine if an address is a broadcast address on a network interface + * + * @param addr address to be checked + * @param netif the network interface against which the address is checked + * @return returns non-zero if the address is a broadcast address + */ +u8_t +ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) +{ + ip4_addr_t ipaddr; + ip4_addr_set_u32(&ipaddr, addr); + + /* all ones (broadcast) or all zeroes (old skool broadcast) */ + if ((~addr == IPADDR_ANY) || + (addr == IPADDR_ANY)) { + return 1; + /* no broadcast support on this network interface? */ + } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { + /* the given address cannot be a broadcast address + * nor can we check against any broadcast addresses */ + return 0; + /* address matches network interface address exactly? => no broadcast */ + } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { + return 0; + /* on the same (sub) network... */ + } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) + /* ...and host identifier bits are all ones? =>... */ + && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == + (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { + /* => network broadcast address */ + return 1; + } else { + return 0; + } +} + +/** Checks if a netmask is valid (starting with ones, then only zeros) + * + * @param netmask the IPv4 netmask to check (in network byte order!) + * @return 1 if the netmask is valid, 0 if it is not + */ +u8_t +ip4_addr_netmask_valid(u32_t netmask) +{ + u32_t mask; + u32_t nm_hostorder = lwip_htonl(netmask); + + /* first, check for the first zero */ + for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) == 0) { + break; + } + } + /* then check that there is no one */ + for (; mask != 0; mask >>= 1) { + if ((nm_hostorder & mask) != 0) { + /* there is a one after the first zero -> invalid */ + return 0; + } + } + /* no one after the first zero -> valid */ + return 1; +} + +/** + * Ascii internet address interpretation routine. + * The value returned is in network order. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @return ip address in network order + */ +u32_t +ipaddr_addr(const char *cp) +{ + ip4_addr_t val; + + if (ip4addr_aton(cp, &val)) { + return ip4_addr_get_u32(&val); + } + return (IPADDR_NONE); +} + +/** + * Check whether "cp" is a valid ascii representation + * of an Internet address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * This replaces inet_addr, the return value from which + * cannot distinguish between failure and a local broadcast address. + * + * @param cp IP address in ascii representation (e.g. "127.0.0.1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip4addr_aton(const char *cp, ip4_addr_t *addr) +{ + u32_t val; + u8_t base; + char c; + u32_t parts[4]; + u32_t *pp = parts; + + c = *cp; + for (;;) { + /* + * Collect number up to ``.''. + * Values are specified as for C: + * 0x=hex, 0=octal, 1-9=decimal. + */ + if (!lwip_isdigit(c)) { + return 0; + } + val = 0; + base = 10; + if (c == '0') { + c = *++cp; + if (c == 'x' || c == 'X') { + base = 16; + c = *++cp; + } else { + base = 8; + } + } + for (;;) { + if (lwip_isdigit(c)) { + val = (val * base) + (u32_t)(c - '0'); + c = *++cp; + } else if (base == 16 && lwip_isxdigit(c)) { + val = (val << 4) | (u32_t)(c + 10 - (lwip_islower(c) ? 'a' : 'A')); + c = *++cp; + } else { + break; + } + } + if (c == '.') { + /* + * Internet format: + * a.b.c.d + * a.b.c (with c treated as 16 bits) + * a.b (with b treated as 24 bits) + */ + if (pp >= parts + 3) { + return 0; + } + *pp++ = val; + c = *++cp; + } else { + break; + } + } + /* + * Check for trailing characters. + */ + if (c != '\0' && !lwip_isspace(c)) { + return 0; + } + /* + * Concoct the address according to + * the number of parts specified. + */ + switch (pp - parts + 1) { + + case 0: + return 0; /* initial nondigit */ + + case 1: /* a -- 32 bits */ + break; + + case 2: /* a.b -- 8.24 bits */ + if (val > 0xffffffUL) { + return 0; + } + if (parts[0] > 0xff) { + return 0; + } + val |= parts[0] << 24; + break; + + case 3: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16); + break; + + case 4: /* a.b.c.d -- 8.8.8.8 bits */ + if (val > 0xff) { + return 0; + } + if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { + return 0; + } + val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); + break; + default: + LWIP_ASSERT("unhandled", 0); + break; + } + if (addr) { + ip4_addr_set_u32(addr, lwip_htonl(val)); + } + return 1; +} + +/** + * Convert numeric IP address into decimal dotted ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip4addr_ntoa(const ip4_addr_t *addr) +{ + static char str[IP4ADDR_STRLEN_MAX]; + return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); +} + +/** + * Same as ip4addr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) +{ + u32_t s_addr; + char inv[3]; + char *rp; + u8_t *ap; + u8_t rem; + u8_t n; + u8_t i; + int len = 0; + + s_addr = ip4_addr_get_u32(addr); + + rp = buf; + ap = (u8_t *)&s_addr; + for (n = 0; n < 4; n++) { + i = 0; + do { + rem = *ap % (u8_t)10; + *ap /= (u8_t)10; + inv[i++] = (char)('0' + rem); + } while (*ap); + while (i--) { + if (len++ >= buflen) { + return NULL; + } + *rp++ = inv[i]; + } + if (len++ >= buflen) { + return NULL; + } + *rp++ = '.'; + ap++; + } + *--rp = 0; + return buf; +} + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c new file mode 100644 index 0000000..b2fd14c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv4/ip4_frag.c @@ -0,0 +1,894 @@ +/** + * @file + * This is the IPv4 packet segmentation and reassembly implementation. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * Simon Goldschmidt + * original reassembly code by Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/ip4_frag.h" +#include "lwip/def.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/stats.h" +#include "lwip/icmp.h" + +#include + +#if IP_REASSEMBLY +/** + * The IP reassembly code currently has the following limitations: + * - IP header options are not supported + * - fragments must not overlap (e.g. due to different routes), + * currently, overlapping or duplicate fragments are thrown away + * if IP_REASS_CHECK_OVERLAP=1 (the default)! + * + * @todo: work with IP header options + */ + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +#define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 +#define IP_REASS_VALIDATE_PBUF_QUEUED 0 +#define IP_REASS_VALIDATE_PBUF_DROPPED -1 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IP header, since it replaces + * the IP header in memory in incoming fragments (after copying it) to keep + * track of the various fragments. (-> If the IP header doesn't need packing, + * this struct doesn't need packing, too.) + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ + (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ + ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ + IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 + +/* global variables */ +static struct ip_reassdata *reassdatagrams; +static u16_t ip_reass_pbufcount; + +/* function prototypes */ +static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); +static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); + +/** + * Reassembly timer base function + * for both NO_SYS == 0 and 1 (!). + * + * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). + */ +void +ip_reass_tmr(void) +{ + struct ip_reassdata *r, *prev = NULL; + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n", (u16_t)r->timer)); + prev = r; + r = r->next; + } else { + /* reassembly timed out */ + struct ip_reassdata *tmp; + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip_reass_free_complete_datagram(tmp, prev); + } + } +} + +/** + * Free a datagram (struct ip_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip_reass_pbufcount), + * SNMP counters and sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + * @param prev the previous datagram in the linked list + * @return the number of pbufs freed + */ +static int +ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip_reass_helper *iprh; + + LWIP_ASSERT("prev != ipr", prev != ipr); + if (prev != NULL) { + LWIP_ASSERT("prev->next == ipr", prev->next == ipr); + } + + MIB2_STATS_INC(mib2.ipreasmfails); +#if LWIP_ICMP + iprh = (struct ip_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Then, copy the original header into it. */ + SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); + icmp_time_exceeded(p, ICMP_TE_FRAG); + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + /* Then, unchain the struct ip_reassdata from the list and free it. */ + ip_reass_dequeue_datagram(ipr, prev); + LWIP_ASSERT("ip_reass_pbufcount >= pbufs_freed", ip_reass_pbufcount >= pbufs_freed); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - pbufs_freed); + + return pbufs_freed; +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram 'fraghdr' belongs to is not freed! + * + * @param fraghdr IP header of the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + * @return the number of pbufs freed + */ +static int +ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) +{ + /* @todo Can't we simply remove the last datagram in the + * linked list behind reassdatagrams? + */ + struct ip_reassdata *r, *oldest, *prev, *oldest_prev; + int pbufs_freed = 0, pbufs_freed_current; + int other_datagrams; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the datagram that 'fraghdr' belongs to! */ + do { + oldest = NULL; + prev = NULL; + oldest_prev = NULL; + other_datagrams = 0; + r = reassdatagrams; + while (r != NULL) { + if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { + /* Not the same datagram as fraghdr */ + other_datagrams++; + if (oldest == NULL) { + oldest = r; + oldest_prev = prev; + } else if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + oldest_prev = prev; + } + } + if (r->next != NULL) { + prev = r; + } + r = r->next; + } + if (oldest != NULL) { + pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); + pbufs_freed += pbufs_freed_current; + } + } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); + return pbufs_freed; +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Enqueues a new fragment into the fragment queue + * @param fraghdr points to the new fragments IP hdr + * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) + * @return A pointer to the queue location into which the fragment was enqueued + */ +static struct ip_reassdata * +ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) +{ + struct ip_reassdata *ipr; +#if ! IP_REASS_FREE_OLDEST + LWIP_UNUSED_ARG(clen); +#endif + + /* No matching previous fragment found, allocate a new reassdata struct */ + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { + ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); + } + if (ipr == NULL) +#endif /* IP_REASS_FREE_OLDEST */ + { + IPFRAG_STATS_INC(ip_frag.memerr); + LWIP_DEBUGF(IP_REASS_DEBUG, ("Failed to alloc reassdata struct\n")); + return NULL; + } + } + memset(ipr, 0, sizeof(struct ip_reassdata)); + ipr->timer = IP_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + /* copy the ip header for later tests and input */ + /* @todo: no ip options supported? */ + SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); + return ipr; +} + +/** + * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. + * @param ipr points to the queue entry to dequeue + */ +static void +ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) +{ + /* dequeue the reass struct */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", prev != NULL); + prev->next = ipr->next; + } + + /* now we can free the ip_reassdata struct */ + memp_free(MEMP_REASSDATA, ipr); +} + +/** + * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list + * will grow over time as new pbufs are rx. + * Also checks that the datagram passes basic continuity checks (if the last + * fragment was received at least once). + * @param ipr points to the reassembly state + * @param new_p points to the pbuf for the current fragment + * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) + * @return see IP_REASS_VALIDATE_* defines + */ +static int +ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) +{ + struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev = NULL; + struct pbuf *q; + u16_t offset, len; + u8_t hlen; + struct ip_hdr *fraghdr; + int valid = 1; + + /* Extract length and fragment offset from current fragment */ + fraghdr = (struct ip_hdr *)new_p->payload; + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + len = (u16_t)(len - hlen); + offset = IPH_OFFSET_BYTES(fraghdr); + + /* overwrite the fragment's ip header from the pbuf with our helper struct, + * and setup the embedded helper structure. */ + /* make sure the struct ip_reass_helper fits into the IP header */ + LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", + sizeof(struct ip_reass_helper) <= IP_HLEN); + iprh = (struct ip_reass_helper *)new_p->payload; + iprh->next_pbuf = NULL; + iprh->start = offset; + iprh->end = (u16_t)(offset + len); + if (iprh->end < offset) { + /* u16_t overflow, cannot handle this */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } + + /* Iterate through until we either get to the end of the list (append), + * or we find one with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip_reass_helper *)q->payload; + if (iprh->start < iprh_tmp->start) { + /* the new pbuf should be inserted before this */ + iprh->next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ +#if IP_REASS_CHECK_OVERLAP + if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { + /* fragment overlaps with previous or following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + if (iprh->end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + return IP_REASS_VALIDATE_PBUF_DROPPED; + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* fragment with the lowest offset */ + ipr->p = new_p; + } + break; + } else if (iprh->start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#if IP_REASS_CHECK_OVERLAP + } else if (iprh->start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + return IP_REASS_VALIDATE_PBUF_DROPPED; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no holes. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = new_p; + if (iprh_prev->end != iprh->start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = new_p; + } + } + + /* At this point, the validation part begins: */ + /* If we already received the last fragment */ + if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { + /* and had no holes so far */ + if (valid) { + /* then check if the rest of the fragments is here */ + /* Check if the queue starts with the first datagram */ + if ((ipr->p == NULL) || (((struct ip_reass_helper *)ipr->p->payload)->start != 0)) { + valid = 0; + } else { + /* and check that there are no holes after this datagram */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while (q != NULL) { + iprh = (struct ip_reass_helper *)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + /* if still valid, all fragments are received + * (because to the MF==0 already arrived */ + if (valid) { + LWIP_ASSERT("sanity check", ipr->p != NULL); + LWIP_ASSERT("sanity check", + ((struct ip_reass_helper *)ipr->p->payload) != iprh); + LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", + iprh->next_pbuf == NULL); + } + } + } + /* If valid is 0 here, there are some fragments missing in the middle + * (since MF == 0 has already arrived). Such datagrams simply time out if + * no more fragments are received... */ + return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; + } + /* If we come here, not all fragments were received, yet! */ + return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ +} + +/** + * Reassembles incoming IP fragments into an IP datagram. + * + * @param p points to a pbuf chain of the fragment + * @return NULL if reassembly is incomplete, ? otherwise + */ +struct pbuf * +ip4_reass(struct pbuf *p) +{ + struct pbuf *r; + struct ip_hdr *fraghdr; + struct ip_reassdata *ipr; + struct ip_reass_helper *iprh; + u16_t offset, len, clen; + u8_t hlen; + int valid; + int is_last; + + IPFRAG_STATS_INC(ip_frag.recv); + MIB2_STATS_INC(mib2.ipreasmreqds); + + fraghdr = (struct ip_hdr *)p->payload; + + if (IPH_HL_BYTES(fraghdr) != IP_HLEN) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: IP options currently not supported!\n")); + IPFRAG_STATS_INC(ip_frag.err); + goto nullreturn; + } + + offset = IPH_OFFSET_BYTES(fraghdr); + len = lwip_ntohs(IPH_LEN(fraghdr)); + hlen = IPH_HL_BYTES(fraghdr); + if (hlen > len) { + /* invalid datagram */ + goto nullreturn; + } + len = (u16_t)(len - hlen); + + /* Check if we are allowed to enqueue more datagrams. */ + clen = pbuf_clen(p); + if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || + ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) +#endif /* IP_REASS_FREE_OLDEST */ + { + /* No datagram could be freed and still too many pbufs enqueued */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", + ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); + IPFRAG_STATS_INC(ip_frag.memerr); + /* @todo: send ICMP time exceeded here? */ + /* drop this pbuf */ + goto nullreturn; + } + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", + lwip_ntohs(IPH_ID(fraghdr)))); + IPFRAG_STATS_INC(ip_frag.cachehit); + break; + } + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); + /* Bail if unable to enqueue */ + if (ipr == NULL) { + goto nullreturn; + } + } else { + if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && + ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { + /* ipr->iphdr is not the header from the first fragment, but fraghdr is + * -> copy fraghdr into ipr->iphdr since we want to have the header + * of the first fragment (for ICMP time exceeded and later, for copying + * all options, if supported)*/ + SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); + } + } + + /* At this point, we have either created a new entry or pointing + * to an existing one */ + + /* check for 'no more fragments', and update queue entry*/ + is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { + /* u16_t overflow, cannot handle this */ + goto nullreturn_ipr; + } + } + /* find the right place to insert this pbuf */ + /* @todo: trim pbufs if fragments are overlapping */ + valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); + if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { + goto nullreturn_ipr; + } + /* if we come here, the pbuf has been enqueued */ + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time + (overflow checked by testing against IP_REASS_MAX_PBUFS) */ + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); + if (is_last) { + u16_t datagram_len = (u16_t)(offset + len); + ipr->datagram_len = datagram_len; + ipr->flags |= IP_REASS_FLAG_LASTFRAG; + LWIP_DEBUGF(IP_REASS_DEBUG, + ("ip4_reass: last fragment seen, total len %"S16_F"\n", + ipr->datagram_len)); + } + + if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { + struct ip_reassdata *ipr_prev; + /* the totally last fragment (flag more fragments = 0) was received at least + * once AND all fragments are received */ + u16_t datagram_len = (u16_t)(ipr->datagram_len + IP_HLEN); + + /* save the second pbuf before copying the header over the pointer */ + r = ((struct ip_reass_helper *)ipr->p->payload)->next_pbuf; + + /* copy the original ip header back to the first pbuf */ + fraghdr = (struct ip_hdr *)(ipr->p->payload); + SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); + IPH_LEN_SET(fraghdr, lwip_htons(datagram_len)); + IPH_OFFSET_SET(fraghdr, 0); + IPH_CHKSUM_SET(fraghdr, 0); + /* @todo: do we need to set/calculate the correct checksum? */ +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + p = ipr->p; + + /* chain together the pbufs contained within the reass_data list. */ + while (r != NULL) { + iprh = (struct ip_reass_helper *)r->payload; + + /* hide the ip header for every succeeding fragment */ + pbuf_remove_header(r, IP_HLEN); + pbuf_cat(p, r); + r = iprh->next_pbuf; + } + + /* find the previous entry in the linked list */ + if (ipr == reassdatagrams) { + ipr_prev = NULL; + } else { + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } + + /* release the sources allocate for the fragment queue entry */ + ip_reass_dequeue_datagram(ipr, ipr_prev); + + /* and adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= clen); + ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount - clen); + + MIB2_STATS_INC(mib2.ipreasmoks); + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); + return NULL; + +nullreturn_ipr: + LWIP_ASSERT("ipr != NULL", ipr != NULL); + if (ipr->p == NULL) { + /* dropped pbuf after creating a new datagram entry: remove the entry, too */ + LWIP_ASSERT("not firstalthough just enqueued", ipr == reassdatagrams); + ip_reass_dequeue_datagram(ipr, NULL); + } + +nullreturn: + LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: nullreturn\n")); + IPFRAG_STATS_INC(ip_frag.drop); + pbuf_free(p); + return NULL; +} +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref * +ip_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref *)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref *p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ipfrag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref *)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void *)pcr == (void *)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IP datagram if too large for the netif. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p. + * + * @param p ip packet to send + * @param netif the netif on which to send + * @param dest destination ip address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) +{ + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + struct ip_hdr *original_iphdr; + struct ip_hdr *iphdr; + const u16_t nfb = (u16_t)((netif->mtu - IP_HLEN) / 8); + u16_t left, fragsize; + u16_t ofo; + int last; + u16_t poff = IP_HLEN; + u16_t tmp; + int mf_set; + + original_iphdr = (struct ip_hdr *)p->payload; + iphdr = original_iphdr; + if (IPH_HL_BYTES(iphdr) != IP_HLEN) { + /* ip4_frag() does not support IP options */ + return ERR_VAL; + } + LWIP_ERROR("ip4_frag(): pbuf too short", p->len >= IP_HLEN, return ERR_VAL); + + /* Save original offset */ + tmp = lwip_ntohs(IPH_OFFSET(iphdr)); + ofo = tmp & IP_OFFMASK; + /* already fragmented? if so, the last fragment we create must have MF, too */ + mf_set = tmp & IP_MF; + + left = (u16_t)(p->tot_len - IP_HLEN); + + while (left) { + /* Fill this fragment */ + fragsize = LWIP_MIN(left, (u16_t)(nfb * 8)); + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP_HLEN)) { + pbuf_free(rambuf); + goto memerr; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link and IP header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); + if (rambuf == NULL) { + goto memerr; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len >= (IP_HLEN))); + SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); + iphdr = (struct ip_hdr *)rambuf->payload; + + left_to_copy = fragsize; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + u16_t plen = (u16_t)(p->len - poff); + LWIP_ASSERT("p->len >= poff", p->len >= poff); + newpbuflen = LWIP_MIN(left_to_copy, plen); + /* Is this pbuf already empty? */ + if (!newpbuflen) { + poff = 0; + p = p->next; + continue; + } + pcr = ip_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + goto memerr; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, + (u8_t *)p->payload + poff, newpbuflen); + if (newpbuf == NULL) { + ip_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + goto memerr; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + poff = 0; + p = p->next; + } + } + poff = (u16_t)(poff + newpbuflen); +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Correct header */ + last = (left <= netif->mtu - IP_HLEN); + + /* Set new offset and MF flag */ + tmp = (IP_OFFMASK & (ofo)); + if (!last || mf_set) { + /* the last fragment has MF set if the input frame had it */ + tmp = tmp | IP_MF; + } + IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); + IPH_LEN_SET(iphdr, lwip_htons((u16_t)(fragsize + IP_HLEN))); + IPH_CHKSUM_SET(iphdr, 0); +#if CHECKSUM_GEN_IP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { + IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); + } +#endif /* CHECKSUM_GEN_IP */ + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + netif->output(netif, rambuf, dest); + IPFRAG_STATS_INC(ip_frag.xmit); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - fragsize); + ofo = (u16_t)(ofo + nfb); + } + MIB2_STATS_INC(mib2.ipfragoks); + return ERR_OK; +memerr: + MIB2_STATS_INC(mib2.ipfragfails); + return ERR_MEM; +} +#endif /* IP_FRAG */ + +#endif /* LWIP_IPV4 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c new file mode 100644 index 0000000..1ae8742 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/dhcp6.c @@ -0,0 +1,812 @@ +/** + * @file + * + * @defgroup dhcp6 DHCPv6 + * @ingroup ip6 + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + * + * For now, only stateless DHCPv6 is implemented! + * + * TODO: + * - enable/disable API to not always start when RA is received + * - stateful DHCPv6 (for now, only stateless DHCPv6 for DNS and NTP servers works) + * - create Client Identifier? + * - only start requests if a valid local address is available on the netif + * - only start information requests if required (not for every RA) + * + * dhcp6_enable_stateful() enables stateful DHCPv6 for a netif (stateless disabled)\n + * dhcp6_enable_stateless() enables stateless DHCPv6 for a netif (stateful disabled)\n + * dhcp6_disable() disable DHCPv6 for a netif + * + * When enabled, requests are only issued after receipt of RA with the + * corresponding bits set. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/dhcp6.h" +#include "lwip/prot/dhcp6.h" +#include "lwip/def.h" +#include "lwip/udp.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif +#ifndef LWIP_HOOK_DHCP6_APPEND_OPTIONS +#define LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, state, msg, msg_type, options_len_ptr, max_len) +#endif +#ifndef LWIP_HOOK_DHCP6_PARSE_OPTION +#define LWIP_HOOK_DHCP6_PARSE_OPTION(netif, dhcp6, state, msg, msg_type, option, len, pbuf, offset) do { LWIP_UNUSED_ARG(msg); } while(0) +#endif + +#if LWIP_DNS && LWIP_DHCP6_MAX_DNS_SERVERS +#if DNS_MAX_SERVERS > LWIP_DHCP6_MAX_DNS_SERVERS +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS LWIP_DHCP6_MAX_DNS_SERVERS +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS +#endif +#else +#define LWIP_DHCP6_PROVIDE_DNS_SERVERS 0 +#endif + + +/** Option handling: options are parsed in dhcp6_parse_reply + * and saved in an array where other functions can load them from. + * This might be moved into the struct dhcp6 (not necessarily since + * lwIP is single-threaded and the array is only used while in recv + * callback). */ +enum dhcp6_option_idx { + DHCP6_OPTION_IDX_CLI_ID = 0, + DHCP6_OPTION_IDX_SERVER_ID, +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + DHCP6_OPTION_IDX_DNS_SERVER, + DHCP6_OPTION_IDX_DOMAIN_LIST, +#endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + DHCP6_OPTION_IDX_NTP_SERVER, +#endif /* LWIP_DHCP_GET_NTP_SRV */ + DHCP6_OPTION_IDX_MAX +}; + +struct dhcp6_option_info { + u8_t option_given; + u16_t val_start; + u16_t val_length; +}; + +/** Holds the decoded option info, only valid while in dhcp6_recv. */ +struct dhcp6_option_info dhcp6_rx_options[DHCP6_OPTION_IDX_MAX]; + +#define dhcp6_option_given(dhcp6, idx) (dhcp6_rx_options[idx].option_given != 0) +#define dhcp6_got_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 1) +#define dhcp6_clear_option(dhcp6, idx) (dhcp6_rx_options[idx].option_given = 0) +#define dhcp6_clear_all_options(dhcp6) (memset(dhcp6_rx_options, 0, sizeof(dhcp6_rx_options))) +#define dhcp6_get_option_start(dhcp6, idx) (dhcp6_rx_options[idx].val_start) +#define dhcp6_get_option_length(dhcp6, idx) (dhcp6_rx_options[idx].val_length) +#define dhcp6_set_option(dhcp6, idx, start, len) do { dhcp6_rx_options[idx].val_start = (start); dhcp6_rx_options[idx].val_length = (len); }while(0) + + +const ip_addr_t dhcp6_All_DHCP6_Relay_Agents_and_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010002); +const ip_addr_t dhcp6_All_DHCP6_Servers = IPADDR6_INIT_HOST(0xFF020000, 0, 0, 0x00010003); + +static struct udp_pcb *dhcp6_pcb; +static u8_t dhcp6_pcb_refcount; + + +/* receive, unfold, parse and free incoming messages */ +static void dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); + +/** Ensure DHCP PCB is allocated and bound */ +static err_t +dhcp6_inc_pcb_refcount(void) +{ + if (dhcp6_pcb_refcount == 0) { + LWIP_ASSERT("dhcp6_inc_pcb_refcount(): memory leak", dhcp6_pcb == NULL); + + /* allocate UDP PCB */ + dhcp6_pcb = udp_new_ip6(); + + if (dhcp6_pcb == NULL) { + return ERR_MEM; + } + + ip_set_option(dhcp6_pcb, SOF_BROADCAST); + + /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ + udp_bind(dhcp6_pcb, IP6_ADDR_ANY, DHCP6_CLIENT_PORT); + udp_recv(dhcp6_pcb, dhcp6_recv, NULL); + } + + dhcp6_pcb_refcount++; + + return ERR_OK; +} + +/** Free DHCP PCB if the last netif stops using it */ +static void +dhcp6_dec_pcb_refcount(void) +{ + LWIP_ASSERT("dhcp6_pcb_refcount(): refcount error", (dhcp6_pcb_refcount > 0)); + dhcp6_pcb_refcount--; + + if (dhcp6_pcb_refcount == 0) { + udp_remove(dhcp6_pcb); + dhcp6_pcb = NULL; + } +} + +/** + * @ingroup dhcp6 + * Set a statically allocated struct dhcp6 to work with. + * Using this prevents dhcp6_start to allocate it using mem_malloc. + * + * @param netif the netif for which to set the struct dhcp + * @param dhcp6 (uninitialised) dhcp6 struct allocated by the application + */ +void +dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("dhcp6 != NULL", dhcp6 != NULL); + LWIP_ASSERT("netif already has a struct dhcp6 set", netif_dhcp6_data(netif) == NULL); + + /* clear data structure */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* dhcp6_set_state(&dhcp, DHCP6_STATE_OFF); */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); +} + +/** + * @ingroup dhcp6 + * Removes a struct dhcp6 from a netif. + * + * ATTENTION: Only use this when not using dhcp6_set_struct() to allocate the + * struct dhcp6 since the memory is passed back to the heap. + * + * @param netif the netif from which to remove the struct dhcp + */ +void dhcp6_cleanup(struct netif *netif) +{ + LWIP_ASSERT("netif != NULL", netif != NULL); + + if (netif_dhcp6_data(netif) != NULL) { + mem_free(netif_dhcp6_data(netif)); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + } +} + +static struct dhcp6* +dhcp6_get_struct(struct netif *netif, const char *dbg_requester) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: mallocing new DHCPv6 client\n", dbg_requester)); + dhcp6 = (struct dhcp6 *)mem_malloc(sizeof(struct dhcp6)); + if (dhcp6 == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: could not allocate dhcp6\n", dbg_requester)); + return NULL; + } + + /* clear data structure, this implies DHCP6_STATE_OFF */ + memset(dhcp6, 0, sizeof(struct dhcp6)); + /* store this dhcp6 client in the netif */ + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, dhcp6); + } else { + /* already has DHCP6 client attached */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("%s: using existing DHCPv6 client\n", dbg_requester)); + } + + if (!dhcp6->pcb_allocated) { + if (dhcp6_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP6 PCB is allocated */ + mem_free(dhcp6); + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL); + return NULL; + } + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("%s: allocated dhcp6", dbg_requester)); + dhcp6->pcb_allocated = 1; + } + return dhcp6; +} + +/* + * Set the DHCPv6 state + * If the state changed, reset the number of tries. + */ +static void +dhcp6_set_state(struct dhcp6 *dhcp6, u8_t new_state, const char *dbg_caller) +{ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("DHCPv6 state: %d -> %d (%s)\n", + dhcp6->state, new_state, dbg_caller)); + if (new_state != dhcp6->state) { + dhcp6->state = new_state; + dhcp6->tries = 0; + dhcp6->request_timeout = 0; + } +} + +static int +dhcp6_stateless_enabled(struct dhcp6 *dhcp6) +{ + if ((dhcp6->state == DHCP6_STATE_STATELESS_IDLE) || + (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG)) { + return 1; + } + return 0; +} + +/*static int +dhcp6_stateful_enabled(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_OFF) { + return 0; + } + if (dhcp6_stateless_enabled(dhcp6)) { + return 0; + } + return 1; +}*/ + +/** + * @ingroup dhcp6 + * Enable stateful DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_MANAGED_ADDR_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + * + * @todo: stateful DHCPv6 not supported, yet + */ +err_t +dhcp6_enable_stateful(struct netif *netif) +{ + LWIP_UNUSED_ARG(netif); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("stateful dhcp6 not implemented yet")); + return ERR_VAL; +} + +/** + * @ingroup dhcp6 + * Enable stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + * + * A struct dhcp6 will be allocated for this netif if not + * set via @ref dhcp6_set_struct before. + */ +err_t +dhcp6_enable_stateless(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_enable_stateless(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = dhcp6_get_struct(netif, "dhcp6_enable_stateless()"); + if (dhcp6 == NULL) { + return ERR_MEM; + } + if (dhcp6_stateless_enabled(dhcp6)) { + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 already enabled")); + return ERR_OK; + } else if (dhcp6->state != DHCP6_STATE_OFF) { + /* stateful running */ + /* @todo: stop stateful once it is implemented */ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): switching from stateful to stateless DHCPv6")); + } + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_enable_stateless(): stateless DHCPv6 enabled\n")); + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_enable_stateless"); + return ERR_OK; +} + +/** + * @ingroup dhcp6 + * Disable stateful or stateless DHCPv6 on this netif + * Requests are sent on receipt of an RA message with the + * ND6_RA_FLAG_OTHER_CONFIG flag set. + */ +void +dhcp6_disable(struct netif *netif) +{ + struct dhcp6 *dhcp6; + + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_disable(netif=%p) %c%c%"U16_F"\n", (void *)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); + + dhcp6 = netif_dhcp6_data(netif); + if (dhcp6 != NULL) { + if (dhcp6->state != DHCP6_STATE_OFF) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_disable(): DHCPv6 disabled (old state: %s)\n", + (dhcp6_stateless_enabled(dhcp6) ? "stateless" : "stateful"))); + dhcp6_set_state(dhcp6, DHCP6_STATE_OFF, "dhcp6_disable"); + if (dhcp6->pcb_allocated != 0) { + dhcp6_dec_pcb_refcount(); /* free DHCPv6 PCB if not needed any more */ + dhcp6->pcb_allocated = 0; + } + } + } +} + +/** + * Create a DHCPv6 request, fill in common headers + * + * @param netif the netif under DHCPv6 control + * @param dhcp6 dhcp6 control struct + * @param message_type message type of the request + * @param opt_len_alloc option length to allocate + * @param options_out_len option length on exit + * @return a pbuf for the message + */ +static struct pbuf * +dhcp6_create_msg(struct netif *netif, struct dhcp6 *dhcp6, u8_t message_type, + u16_t opt_len_alloc, u16_t *options_out_len) +{ + struct pbuf *p_out; + struct dhcp6_msg *msg_out; + + LWIP_ERROR("dhcp6_create_msg: netif != NULL", (netif != NULL), return NULL;); + LWIP_ERROR("dhcp6_create_msg: dhcp6 != NULL", (dhcp6 != NULL), return NULL;); + p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp6_msg) + opt_len_alloc, PBUF_RAM); + if (p_out == NULL) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("dhcp6_create_msg(): could not allocate pbuf\n")); + return NULL; + } + LWIP_ASSERT("dhcp6_create_msg: check that first pbuf can hold struct dhcp6_msg", + (p_out->len >= sizeof(struct dhcp6_msg) + opt_len_alloc)); + + /* @todo: limit new xid for certain message types? */ + /* reuse transaction identifier in retransmissions */ + if (dhcp6->tries == 0) { + dhcp6->xid = LWIP_RAND() & 0xFFFFFF; + } + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, + ("transaction id xid(%"X32_F")\n", dhcp6->xid)); + + msg_out = (struct dhcp6_msg *)p_out->payload; + memset(msg_out, 0, sizeof(struct dhcp6_msg) + opt_len_alloc); + + msg_out->msgtype = message_type; + msg_out->transaction_id[0] = (u8_t)(dhcp6->xid >> 16); + msg_out->transaction_id[1] = (u8_t)(dhcp6->xid >> 8); + msg_out->transaction_id[2] = (u8_t)dhcp6->xid; + *options_out_len = 0; + return p_out; +} + +static u16_t +dhcp6_option_short(u16_t options_out_len, u8_t *options, u16_t value) +{ + options[options_out_len++] = (u8_t)((value & 0xff00U) >> 8); + options[options_out_len++] = (u8_t) (value & 0x00ffU); + return options_out_len; +} + +static u16_t +dhcp6_option_optionrequest(u16_t options_out_len, u8_t *options, const u16_t *req_options, + u16_t num_req_options, u16_t max_len) +{ + size_t i; + u16_t ret; + + LWIP_ASSERT("dhcp6_option_optionrequest: options_out_len + sizeof(struct dhcp6_msg) + addlen <= max_len", + sizeof(struct dhcp6_msg) + options_out_len + 4U + (2U * num_req_options) <= max_len); + LWIP_UNUSED_ARG(max_len); + + ret = dhcp6_option_short(options_out_len, options, DHCP6_OPTION_ORO); + ret = dhcp6_option_short(ret, options, 2 * num_req_options); + for (i = 0; i < num_req_options; i++) { + ret = dhcp6_option_short(ret, options, req_options[i]); + } + return ret; +} + +/* All options are added, shrink the pbuf to the required size */ +static void +dhcp6_msg_finalize(u16_t options_out_len, struct pbuf *p_out) +{ + /* shrink the pbuf to the actual content length */ + pbuf_realloc(p_out, (u16_t)(sizeof(struct dhcp6_msg) + options_out_len)); +} + + +#if LWIP_IPV6_DHCP6_STATELESS +static void +dhcp6_information_request(struct netif *netif, struct dhcp6 *dhcp6) +{ + const u16_t requested_options[] = {DHCP6_OPTION_DNS_SERVERS, DHCP6_OPTION_DOMAIN_LIST, DHCP6_OPTION_SNTP_SERVERS}; + u16_t msecs; + struct pbuf *p_out; + u16_t options_out_len; + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request()\n")); + /* create and initialize the DHCP message header */ + p_out = dhcp6_create_msg(netif, dhcp6, DHCP6_INFOREQUEST, 4 + sizeof(requested_options), &options_out_len); + if (p_out != NULL) { + err_t err; + struct dhcp6_msg *msg_out = (struct dhcp6_msg *)p_out->payload; + u8_t *options = (u8_t *)(msg_out + 1); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_information_request: making request\n")); + + options_out_len = dhcp6_option_optionrequest(options_out_len, options, requested_options, + LWIP_ARRAYSIZE(requested_options), p_out->len); + LWIP_HOOK_DHCP6_APPEND_OPTIONS(netif, dhcp6, DHCP6_STATE_REQUESTING_CONFIG, msg_out, + DHCP6_INFOREQUEST, options_out_len, p_out->len); + dhcp6_msg_finalize(options_out_len, p_out); + + err = udp_sendto_if(dhcp6_pcb, p_out, &dhcp6_All_DHCP6_Relay_Agents_and_Servers, DHCP6_SERVER_PORT, netif); + pbuf_free(p_out); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request: INFOREQUESTING -> %d\n", (int)err)); + LWIP_UNUSED_ARG(err); + } else { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp6_information_request: could not allocate DHCP6 request\n")); + } + dhcp6_set_state(dhcp6, DHCP6_STATE_REQUESTING_CONFIG, "dhcp6_information_request"); + if (dhcp6->tries < 255) { + dhcp6->tries++; + } + msecs = (u16_t)((dhcp6->tries < 6 ? 1 << dhcp6->tries : 60) * 1000); + dhcp6->request_timeout = (u16_t)((msecs + DHCP6_TIMER_MSECS - 1) / DHCP6_TIMER_MSECS); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_information_request(): set request timeout %"U16_F" msecs\n", msecs)); +} + +static err_t +dhcp6_request_config(struct netif *netif, struct dhcp6 *dhcp6) +{ + /* stateless mode enabled and no request running? */ + if (dhcp6->state == DHCP6_STATE_STATELESS_IDLE) { + /* send Information-request and wait for answer; setup receive timeout */ + dhcp6_information_request(netif, dhcp6); + } + + return ERR_OK; +} + +static void +dhcp6_abort_config_request(struct dhcp6 *dhcp6) +{ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + /* abort running request */ + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_abort_config_request"); + } +} + +/* Handle a REPLY to INFOREQUEST + * This parses DNS and NTP server addresses from the reply. + */ +static void +dhcp6_handle_config_reply(struct netif *netif, struct pbuf *p_msg_in) +{ + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(dhcp6); + LWIP_UNUSED_ARG(p_msg_in); + +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER)) { + ip_addr_t dns_addr; + ip6_addr_t *dns_addr6; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + u16_t idx; + u8_t n; + + memset(&dns_addr, 0, sizeof(dns_addr)); + dns_addr6 = ip_2_ip6(&dns_addr); + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_PROVIDE_DNS_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied = pbuf_copy_partial(p_msg_in, dns_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(dns_addr6, IP6_UNKNOWN, netif); + /* @todo: do we need a different offset than DHCP(v4)? */ + dns_setserver(n, &dns_addr); + } + } + /* @ todo: parse and set Domain Search List */ +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ + +#if LWIP_DHCP6_GET_NTP_SRV + if (dhcp6_option_given(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER)) { + ip_addr_t ntp_server_addrs[LWIP_DHCP6_MAX_NTP_SERVERS]; + u16_t op_start = dhcp6_get_option_start(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t op_len = dhcp6_get_option_length(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + u16_t idx; + u8_t n; + + for (n = 0, idx = op_start; (idx < op_start + op_len) && (n < LWIP_DHCP6_MAX_NTP_SERVERS); + n++, idx += sizeof(struct ip6_addr_packed)) { + u16_t copied; + ip6_addr_t *ntp_addr6 = ip_2_ip6(&ntp_server_addrs[n]); + ip_addr_set_zero_ip6(&ntp_server_addrs[n]); + copied = pbuf_copy_partial(p_msg_in, ntp_addr6, sizeof(struct ip6_addr_packed), idx); + if (copied != sizeof(struct ip6_addr_packed)) { + /* pbuf length mismatch */ + return; + } + ip6_addr_assign_zone(ntp_addr6, IP6_UNKNOWN, netif); + } + dhcp6_set_ntp_servers(n, ntp_server_addrs); + } +#endif /* LWIP_DHCP6_GET_NTP_SRV */ +} +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + +/** This function is called from nd6 module when an RA messsage is received + * It triggers DHCPv6 requests (if enabled). + */ +void +dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config) +{ + struct dhcp6 *dhcp6; + + LWIP_ASSERT("netif != NULL", netif != NULL); + dhcp6 = netif_dhcp6_data(netif); + + LWIP_UNUSED_ARG(managed_addr_config); + LWIP_UNUSED_ARG(other_config); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + if (dhcp6 != NULL) { + if (dhcp6_stateless_enabled(dhcp6)) { + if (other_config) { + dhcp6_request_config(netif, dhcp6); + } else { + dhcp6_abort_config_request(dhcp6); + } + } + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * Parse the DHCPv6 message and extract the DHCPv6 options. + * + * Extract the DHCPv6 options (offset + length) so that we can later easily + * check for them or extract the contents. + */ +static err_t +dhcp6_parse_reply(struct pbuf *p, struct dhcp6 *dhcp6) +{ + u16_t offset; + u16_t offset_max; + u16_t options_idx; + struct dhcp6_msg *msg_in; + + LWIP_UNUSED_ARG(dhcp6); + + /* clear received options */ + dhcp6_clear_all_options(dhcp6); + msg_in = (struct dhcp6_msg *)p->payload; + + /* parse options */ + + options_idx = sizeof(struct dhcp6_msg); + /* parse options to the end of the received packet */ + offset_max = p->tot_len; + + offset = options_idx; + /* at least 4 byte to read? */ + while ((offset + 4 <= offset_max)) { + u8_t op_len_buf[4]; + u8_t *op_len; + u16_t op; + u16_t len; + u16_t val_offset = (u16_t)(offset + 4); + if (val_offset < offset) { + /* overflow */ + return ERR_BUF; + } + /* copy option + length, might be split accross pbufs */ + op_len = (u8_t *)pbuf_get_contiguous(p, op_len_buf, 4, 4, offset); + if (op_len == NULL) { + /* failed to get option and length */ + return ERR_VAL; + } + op = (op_len[0] << 8) | op_len[1]; + len = (op_len[2] << 8) | op_len[3]; + offset = val_offset + len; + if (offset < val_offset) { + /* overflow */ + return ERR_BUF; + } + + switch (op) { + case (DHCP6_OPTION_CLIENTID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_CLI_ID, val_offset, len); + break; + case (DHCP6_OPTION_SERVERID): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_SERVER_ID, val_offset, len); + break; +#if LWIP_DHCP6_PROVIDE_DNS_SERVERS + case (DHCP6_OPTION_DNS_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DNS_SERVER, val_offset, len); + break; + case (DHCP6_OPTION_DOMAIN_LIST): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_DOMAIN_LIST, val_offset, len); + break; +#endif /* LWIP_DHCP6_PROVIDE_DNS_SERVERS */ +#if LWIP_DHCP6_GET_NTP_SRV + case (DHCP6_OPTION_SNTP_SERVERS): + dhcp6_got_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER); + dhcp6_set_option(dhcp6, DHCP6_OPTION_IDX_NTP_SERVER, val_offset, len); + break; +#endif /* LWIP_DHCP6_GET_NTP_SRV*/ + default: + LWIP_DEBUGF(DHCP6_DEBUG, ("skipping option %"U16_F" in options\n", op)); + LWIP_HOOK_DHCP6_PARSE_OPTION(ip_current_netif(), dhcp6, dhcp6->state, msg_in, + msg_in->msgtype, op, len, q, val_offset); + break; + } + } + return ERR_OK; +} + +static void +dhcp6_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) +{ + struct netif *netif = ip_current_input_netif(); + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + struct dhcp6_msg *reply_msg = (struct dhcp6_msg *)p->payload; + u8_t msg_type; + u32_t xid; + + LWIP_UNUSED_ARG(arg); + + /* Caught DHCPv6 message from netif that does not have DHCPv6 enabled? -> not interested */ + if ((dhcp6 == NULL) || (dhcp6->pcb_allocated == 0)) { + goto free_pbuf_and_return; + } + + LWIP_ERROR("invalid server address type", IP_IS_V6(addr), goto free_pbuf_and_return;); + + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_recv(pbuf = %p) from DHCPv6 server %s port %"U16_F"\n", (void *)p, + ipaddr_ntoa(addr), port)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); + /* prevent warnings about unused arguments */ + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(addr); + LWIP_UNUSED_ARG(port); + + if (p->len < sizeof(struct dhcp6_msg)) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCPv6 reply message or pbuf too short\n")); + goto free_pbuf_and_return; + } + + /* match transaction ID against what we expected */ + xid = reply_msg->transaction_id[0] << 16; + xid |= reply_msg->transaction_id[1] << 8; + xid |= reply_msg->transaction_id[2]; + if (xid != dhcp6->xid) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, + ("transaction id mismatch reply_msg->xid(%"X32_F")!= dhcp6->xid(%"X32_F")\n", xid, dhcp6->xid)); + goto free_pbuf_and_return; + } + /* option fields could be unfold? */ + if (dhcp6_parse_reply(p, dhcp6) != ERR_OK) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, + ("problem unfolding DHCPv6 message - too short on memory?\n")); + goto free_pbuf_and_return; + } + + /* read DHCP message type */ + msg_type = reply_msg->msgtype; + /* message type is DHCP6 REPLY? */ + if (msg_type == DHCP6_REPLY) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("DHCP6_REPLY received\n")); +#if LWIP_IPV6_DHCP6_STATELESS + /* in info-requesting state? */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + dhcp6_set_state(dhcp6, DHCP6_STATE_STATELESS_IDLE, "dhcp6_recv"); + dhcp6_handle_config_reply(netif, p); + } else +#endif /* LWIP_IPV6_DHCP6_STATELESS */ + { + /* @todo: handle reply in other states? */ + } + } else { + /* @todo: handle other message types */ + } + +free_pbuf_and_return: + pbuf_free(p); +} + +/** + * A DHCPv6 request has timed out. + * + * The timer that was started with the DHCPv6 request has + * timed out, indicating no response was received in time. + */ +static void +dhcp6_timeout(struct netif *netif, struct dhcp6 *dhcp6) +{ + LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout()\n")); + + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(dhcp6); + +#if LWIP_IPV6_DHCP6_STATELESS + /* back-off period has passed, or server selection timed out */ + if (dhcp6->state == DHCP6_STATE_REQUESTING_CONFIG) { + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE, ("dhcp6_timeout(): retrying information request\n")); + dhcp6_information_request(netif, dhcp6); + } +#endif /* LWIP_IPV6_DHCP6_STATELESS */ +} + +/** + * DHCPv6 timeout handling (this function must be called every 500ms, + * see @ref DHCP6_TIMER_MSECS). + * + * A DHCPv6 server is expected to respond within a short period of time. + * This timer checks whether an outstanding DHCPv6 request is timed out. + */ +void +dhcp6_tmr(void) +{ + struct netif *netif; + /* loop through netif's */ + NETIF_FOREACH(netif) { + struct dhcp6 *dhcp6 = netif_dhcp6_data(netif); + /* only act on DHCPv6 configured interfaces */ + if (dhcp6 != NULL) { + /* timer is active (non zero), and is about to trigger now */ + if (dhcp6->request_timeout > 1) { + dhcp6->request_timeout--; + } else if (dhcp6->request_timeout == 1) { + dhcp6->request_timeout--; + /* { dhcp6->request_timeout == 0 } */ + LWIP_DEBUGF(DHCP6_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp6_tmr(): request timeout\n")); + /* this client's request timeout triggered */ + dhcp6_timeout(netif, dhcp6); + } + } + } +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c new file mode 100644 index 0000000..0a616b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ethip6.c @@ -0,0 +1,123 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET + +#include "lwip/ethip6.h" +#include "lwip/nd6.h" +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/prot/ethernet.h" +#include "netif/ethernet.h" + +#include + +/** + * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. + * + * For IPv6 multicast, corresponding Ethernet addresses + * are selected and the packet is transmitted on the link. + * + * For unicast addresses, ask the ND6 module what to do. It will either let us + * send the the packet right away, or queue the packet for later itself, unless + * an error occurs. + * + * @todo anycast addresses + * + * @param netif The lwIP network interface which the IP packet will be sent on. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The IP address of the packet destination. + * + * @return + * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. + */ +err_t +ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) +{ + struct eth_addr dest; + const u8_t *hwaddr; + err_t result; + + LWIP_ASSERT_CORE_LOCKED(); + + /* The destination IP address must be properly zoned from here on down. */ + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + + /* multicast destination IP address? */ + if (ip6_addr_ismulticast(ip6addr)) { + /* Hash IP multicast address to MAC address.*/ + dest.addr[0] = 0x33; + dest.addr[1] = 0x33; + dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; + dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; + dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; + dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; + + /* Send out. */ + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); + } + + /* We have a unicast destination IP address */ + /* @todo anycast? */ + + /* Ask ND6 what to do with the packet. */ + result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); + if (result != ERR_OK) { + return result; + } + + /* If no hardware address is returned, nd6 has queued the packet for later. */ + if (hwaddr == NULL) { + return ERR_OK; + } + + /* Send out the packet using the returned hardware address. */ + SMEMCPY(dest.addr, hwaddr, 6); + return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +} + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c new file mode 100644 index 0000000..7c93668 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/icmp6.c @@ -0,0 +1,425 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/icmp6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" + +#include + +#if LWIP_ICMP6_DATASIZE == 0 +#undef LWIP_ICMP6_DATASIZE +#define LWIP_ICMP6_DATASIZE 8 +#endif + +/* Forward declarations */ +static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); +static void icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +static void icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, + u8_t type, const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr, struct netif *netif); + + +/** + * Process an input ICMPv6 message. Called by ip6_input. + * + * Will generate a reply for echo requests. Other messages are forwarded + * to nd6_input, or mld6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +icmp6_input(struct pbuf *p, struct netif *inp) +{ + struct icmp6_hdr *icmp6hdr; + struct pbuf *r; + const ip6_addr_t *reply_src; + + ICMP6_STATS_INC(icmp6.recv); + + /* Check that ICMPv6 header fits in payload */ + if (p->len < sizeof(struct icmp6_hdr)) { + /* drop short packets */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.lenerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + +#if CHECKSUM_CHECK_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { + if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), + ip6_current_dest_addr()) != 0) { + /* Checksum failed */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.chkerr); + ICMP6_STATS_INC(icmp6.drop); + return; + } + } +#endif /* CHECKSUM_CHECK_ICMP6 */ + + switch (icmp6hdr->type) { + case ICMP6_TYPE_NA: /* Neighbor advertisement */ + case ICMP6_TYPE_NS: /* Neighbor solicitation */ + case ICMP6_TYPE_RA: /* Router advertisement */ + case ICMP6_TYPE_RD: /* Redirect */ + case ICMP6_TYPE_PTB: /* Packet too big */ + nd6_input(p, inp); + return; + case ICMP6_TYPE_RS: +#if LWIP_IPV6_FORWARD + /* @todo implement router functionality */ +#endif + break; +#if LWIP_IPV6_MLD + case ICMP6_TYPE_MLQ: + case ICMP6_TYPE_MLR: + case ICMP6_TYPE_MLD: + mld6_input(p, inp); + return; +#endif + case ICMP6_TYPE_EREQ: +#if !LWIP_MULTICAST_PING + /* multicast destination address? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.drop); + return; + } +#endif /* LWIP_MULTICAST_PING */ + + /* Allocate reply. */ + r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); + if (r == NULL) { + /* drop */ + pbuf_free(p); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + + /* Copy echo request. */ + if (pbuf_copy(r, p) != ERR_OK) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.err); + return; + } + + /* Determine reply source IPv6 address. */ +#if LWIP_MULTICAST_PING + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); + if (reply_src == NULL) { + /* drop */ + pbuf_free(p); + pbuf_free(r); + ICMP6_STATS_INC(icmp6.rterr); + return; + } + } + else +#endif /* LWIP_MULTICAST_PING */ + { + reply_src = ip6_current_dest_addr(); + } + + /* Set fields in reply. */ + ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; + ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { + ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, + IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send reply. */ + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(r, reply_src, ip6_current_src_addr(), + LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); + pbuf_free(r); + + break; + default: + ICMP6_STATS_INC(icmp6.proterr); + ICMP6_STATS_INC(icmp6.drop); + break; + } + + pbuf_free(p); +} + + +/** + * Send an icmpv6 'destination unreachable' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'unreachable' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the unreachable type + */ +void +icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); +} + +/** + * Send an icmpv6 'packet too big' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'packet too big' should be sent, + * p->payload pointing to the IPv6 header + * @param mtu the maximum mtu that we can accept + */ +void +icmp6_packet_too_big(struct pbuf *p, u32_t mtu) +{ + icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); +} + +/** + * Send an icmpv6 'time exceeded' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + */ +void +icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) +{ + icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); +} + +/** + * Send an icmpv6 'time exceeded' packet, with explicit source and destination + * addresses. + * + * This function may be used to send a response sometime after receiving the + * packet for which this response is meant. The provided source and destination + * addresses are used primarily to retain their zone information. + * + * @param p the input packet for which the 'time exceeded' should be sent, + * p->payload pointing to the IPv6 header + * @param c ICMPv6 code for the time exceeded type + * @param src_addr source address of the original packet, with zone information + * @param dest_addr destination address of the original packet, with zone + * information + */ +void +icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + icmp6_send_response_with_addrs(p, c, 0, ICMP6_TYPE_TE, src_addr, dest_addr); +} + +/** + * Send an icmpv6 'parameter problem' packet. + * + * This function must be used only in direct response to a packet that is being + * received right now. Otherwise, address zones would be lost and the calculated + * offset would be wrong (calculated against ip6_current_header()). + * + * @param p the input packet for which the 'param problem' should be sent, + * p->payload pointing to the IP header + * @param c ICMPv6 code for the param problem type + * @param pointer the pointer to the byte where the parameter is found + */ +void +icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer) +{ + u32_t pointer_u32 = (u32_t)((const u8_t *)pointer - (const u8_t *)ip6_current_header()); + icmp6_send_response(p, c, pointer_u32, ICMP6_TYPE_PP); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * The packet is sent *to* ip_current_src_addr() on ip_current_netif(). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + */ +static void +icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif = ip_current_netif(); + + LWIP_ASSERT("icmpv6 packet not a direct response", netif != NULL); + reply_dest = ip6_current_src_addr(); + + /* Select an address to use as source. */ + reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); + if (reply_src == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, reply_dest, netif); +} + +/** + * Send an ICMPv6 packet in response to an incoming packet. + * + * Call this function if the packet is NOT sent as a direct response to an + * incoming packet, but rather sometime later (e.g. for a fragment reassembly + * timeout). The caller must provide the zoned source and destination addresses + * from the original packet with the src_addr and dest_addr parameters. The + * reason for this approach is that while the addresses themselves are part of + * the original packet, their zone information is not, thus possibly resulting + * in a link-local response being sent over the wrong link. + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param src_addr original source address + * @param dest_addr original destination address + */ +static void +icmp6_send_response_with_addrs(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr) +{ + const struct ip6_addr *reply_src, *reply_dest; + struct netif *netif; + + /* Get the destination address and netif for this ICMP message. */ + LWIP_ASSERT("must provide both source and destination", src_addr != NULL); + LWIP_ASSERT("must provide both source and destination", dest_addr != NULL); + + /* Special case, as ip6_current_xxx is either NULL, or points + to a different packet than the one that expired. */ + IP6_ADDR_ZONECHECK(src_addr); + IP6_ADDR_ZONECHECK(dest_addr); + /* Swap source and destination for the reply. */ + reply_dest = src_addr; + reply_src = dest_addr; + netif = ip6_route(reply_src, reply_dest); + if (netif == NULL) { + ICMP6_STATS_INC(icmp6.rterr); + return; + } + icmp6_send_response_with_addrs_and_netif(p, code, data, type, reply_src, + reply_dest, netif); +} + +/** + * Send an ICMPv6 packet (with srd/dst address and netif given). + * + * @param p the input packet for which the response should be sent, + * p->payload pointing to the IPv6 header + * @param code Code of the ICMPv6 header + * @param data Additional 32-bit parameter in the ICMPv6 header + * @param type Type of the ICMPv6 header + * @param reply_src source address of the packet to send + * @param reply_dest destination address of the packet to send + * @param netif netif to send the packet + */ +static void +icmp6_send_response_with_addrs_and_netif(struct pbuf *p, u8_t code, u32_t data, u8_t type, + const ip6_addr_t *reply_src, const ip6_addr_t *reply_dest, struct netif *netif) +{ + struct pbuf *q; + struct icmp6_hdr *icmp6hdr; + + /* ICMPv6 header + IPv6 header + data */ + q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, + PBUF_RAM); + if (q == NULL) { + LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); + ICMP6_STATS_INC(icmp6.memerr); + return; + } + LWIP_ASSERT("check that first pbuf can hold icmp 6message", + (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); + + icmp6hdr = (struct icmp6_hdr *)q->payload; + icmp6hdr->type = type; + icmp6hdr->code = code; + icmp6hdr->data = lwip_htonl(data); + + /* copy fields from original packet */ + SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, + IP6_HLEN + LWIP_ICMP6_DATASIZE); + + /* calculate checksum */ + icmp6hdr->chksum = 0; +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, + reply_src, reply_dest); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + ICMP6_STATS_INC(icmp6.xmit); + ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(q); +} + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c new file mode 100644 index 0000000..7cb16aa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/inet6.c @@ -0,0 +1,53 @@ +/** + * @file + * + * INET v6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/inet.h" + +/** This variable is initialized by the system to contain the wildcard IPv6 address. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c new file mode 100644 index 0000000..b0906a3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6.c @@ -0,0 +1,1492 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/netif.h" +#include "lwip/ip.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6_frag.h" +#include "lwip/icmp6.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/dhcp6.h" +#include "lwip/nd6.h" +#include "lwip/mld6.h" +#include "lwip/debug.h" +#include "lwip/stats.h" + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** + * Finds the appropriate network interface for a given IPv6 address. It tries to select + * a netif following a sequence of heuristics: + * 1) if there is only 1 netif, return it + * 2) if the destination is a zoned address, match its zone to a netif + * 3) if the either the source or destination address is a scoped address, + * match the source address's zone (if set) or address (if not) to a netif + * 4) tries to match the destination subnet to a configured address + * 5) tries to find a router-announced route + * 6) tries to match the (unscoped) source address to the netif + * 7) returns the default netif, if configured + * + * Note that each of the two given addresses may or may not be properly zoned. + * + * @param src the source IPv6 address, if known + * @param dest the destination IPv6 address for which to find the route + * @return the netif on which to send to reach dest + */ +struct netif * +ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) +{ +#if LWIP_SINGLE_NETIF + LWIP_UNUSED_ARG(src); + LWIP_UNUSED_ARG(dest); +#else /* LWIP_SINGLE_NETIF */ + struct netif *netif; + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + /* If single netif configuration, fast return. */ + if ((netif_list != NULL) && (netif_list->next == NULL)) { + if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list) || + (ip6_addr_has_zone(dest) && !ip6_addr_test_zone(dest, netif_list))) { + return NULL; + } + return netif_list; + } + +#if LWIP_IPV6_SCOPES + /* Special processing for zoned destination addresses. This includes link- + * local unicast addresses and interface/link-local multicast addresses. Use + * the zone to find a matching netif. If the address is not zoned, then there + * is technically no "wrong" netif to choose, and we leave routing to other + * rules; in most cases this should be the scoped-source rule below. */ + if (ip6_addr_has_zone(dest)) { + IP6_ADDR_ZONECHECK(dest); + /* Find a netif based on the zone. For custom mappings, one zone may map + * to multiple netifs, so find one that can actually send a packet. */ + NETIF_FOREACH(netif) { + if (ip6_addr_test_zone(dest, netif) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + /* No matching netif found. Do no try to route to a different netif, + * as that would be a zone violation, resulting in any packets sent to + * that netif being dropped on output. */ + return NULL; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* Special processing for scoped source and destination addresses. If we get + * here, the destination address does not have a zone, so either way we need + * to look at the source address, which may or may not have a zone. If it + * does, the zone is restrictive: there is (typically) only one matching + * netif for it, and we should avoid routing to any other netif as that would + * result in guaranteed zone violations. For scoped source addresses that do + * not have a zone, use (only) a netif that has that source address locally + * assigned. This case also applies to the loopback source address, which has + * an implied link-local scope. If only the destination address is scoped + * (but, again, not zoned), we still want to use only the source address to + * determine its zone because that's most likely what the user/application + * wants, regardless of whether the source address is scoped. Finally, some + * of this story also applies if scoping is disabled altogether. */ +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_scope(dest, IP6_UNKNOWN) || + ip6_addr_has_scope(src, IP6_UNICAST) || +#else /* LWIP_IPV6_SCOPES */ + if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_iflocal(dest) || + ip6_addr_ismulticast_linklocal(dest) || ip6_addr_islinklocal(src) || +#endif /* LWIP_IPV6_SCOPES */ + ip6_addr_isloopback(src)) { +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(src)) { + /* Find a netif matching the source zone (relatively cheap). */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif) && netif_is_link_up(netif) && + ip6_addr_test_zone(src, netif)) { + return netif; + } + } + } else +#endif /* LWIP_IPV6_SCOPES */ + { + /* Find a netif matching the source address (relatively expensive). */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + /* Again, do not use any other netif in this case, as that could result in + * zone boundary violations. */ + return NULL; + } + + /* We come here only if neither source nor destination is scoped. */ + IP6_ADDR_ZONECHECK(src); + +#ifdef LWIP_HOOK_IP6_ROUTE + netif = LWIP_HOOK_IP6_ROUTE(src, dest); + if (netif != NULL) { + return netif; + } +#endif + + /* See if the destination subnet matches a configured address. In accordance + * with RFC 5942, dynamically configured addresses do not have an implied + * local subnet, and thus should be considered /128 assignments. However, as + * such, the destination address may still match a local address, and so we + * still need to check for exact matches here. By (lwIP) policy, statically + * configured addresses do always have an implied local /64 subnet. */ + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_netcmp(dest, netif_ip6_addr(netif, i)) && + (netif_ip6_addr_isstatic(netif, i) || + ip6_addr_nethostcmp(dest, netif_ip6_addr(netif, i)))) { + return netif; + } + } + } + + /* Get the netif for a suitable router-announced route. */ + netif = nd6_find_route(dest); + if (netif != NULL) { + return netif; + } + + /* Try with the netif that matches the source address. Given the earlier rule + * for scoped source addresses, this applies to unscoped addresses only. */ + if (!ip6_addr_isany(src)) { + NETIF_FOREACH(netif) { + if (!netif_is_up(netif) || !netif_is_link_up(netif)) { + continue; + } + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { + return netif; + } + } + } + } + +#if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF + /* loopif is disabled, loopback traffic is passed through any netif */ + if (ip6_addr_isloopback(dest)) { + /* don't check for link on loopback traffic */ + if (netif_default != NULL && netif_is_up(netif_default)) { + return netif_default; + } + /* default netif is not up, just use any netif for loopback traffic */ + NETIF_FOREACH(netif) { + if (netif_is_up(netif)) { + return netif; + } + } + return NULL; + } +#endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ +#endif /* !LWIP_SINGLE_NETIF */ + + /* no matching netif found, use default netif, if up */ + if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { + return NULL; + } + return netif_default; +} + +/** + * @ingroup ip6 + * Select the best IPv6 source address for a given destination IPv6 address. + * + * This implementation follows RFC 6724 Sec. 5 to the following extent: + * - Rules 1, 2, 3: fully implemented + * - Rules 4, 5, 5.5: not applicable + * - Rule 6: not implemented + * - Rule 7: not applicable + * - Rule 8: limited to "prefer /64 subnet match over non-match" + * + * For Rule 2, we deliberately deviate from RFC 6724 Sec. 3.1 by considering + * ULAs to be of smaller scope than global addresses, to avoid that a preferred + * ULA is picked over a deprecated global address when given a global address + * as destination, as that would likely result in broken two-way communication. + * + * As long as temporary addresses are not supported (as used in Rule 7), a + * proper implementation of Rule 8 would obviate the need to implement Rule 6. + * + * @param netif the netif on which to send a packet + * @param dest the destination we are trying to reach (possibly not properly + * zoned) + * @return the most suitable source address to use, or NULL if no suitable + * source address is found + */ +const ip_addr_t * +ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) +{ + const ip_addr_t *best_addr; + const ip6_addr_t *cand_addr; + s8_t dest_scope, cand_scope; + s8_t best_scope = IP6_MULTICAST_SCOPE_RESERVED; + u8_t i, cand_pref, cand_bits; + u8_t best_pref = 0; + u8_t best_bits = 0; + + /* Start by determining the scope of the given destination address. These + * tests are hopefully (roughly) in order of likeliness to match. */ + if (ip6_addr_isglobal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(dest) || ip6_addr_isloopback(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_ismulticast(dest)) { + dest_scope = ip6_addr_multicast_scope(dest); + } else if (ip6_addr_issitelocal(dest)) { + dest_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, consider scope global */ + dest_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } + + best_addr = NULL; + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + /* Consider only valid (= preferred and deprecated) addresses. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + continue; + } + /* Determine the scope of this candidate address. Same ordering idea. */ + cand_addr = netif_ip6_addr(netif, i); + if (ip6_addr_isglobal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_GLOBAL; + } else if (ip6_addr_islinklocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_LINK_LOCAL; + } else if (ip6_addr_isuniquelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL; + } else if (ip6_addr_issitelocal(cand_addr)) { + cand_scope = IP6_MULTICAST_SCOPE_SITE_LOCAL; + } else { + /* no match, treat as low-priority global scope */ + cand_scope = IP6_MULTICAST_SCOPE_RESERVEDF; + } + cand_pref = ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)); + /* @todo compute the actual common bits, for longest matching prefix. */ + /* We cannot count on the destination address having a proper zone + * assignment, so do not compare zones in this case. */ + cand_bits = ip6_addr_netcmp_zoneless(cand_addr, dest); /* just 1 or 0 for now */ + if (cand_bits && ip6_addr_nethostcmp(cand_addr, dest)) { + return netif_ip_addr6(netif, i); /* Rule 1 */ + } + if ((best_addr == NULL) || /* no alternative yet */ + ((cand_scope < best_scope) && (cand_scope >= dest_scope)) || + ((cand_scope > best_scope) && (best_scope < dest_scope)) || /* Rule 2 */ + ((cand_scope == best_scope) && ((cand_pref > best_pref) || /* Rule 3 */ + ((cand_pref == best_pref) && (cand_bits > best_bits))))) { /* Rule 8 */ + /* We found a new "winning" candidate. */ + best_addr = netif_ip_addr6(netif, i); + best_scope = cand_scope; + best_pref = cand_pref; + best_bits = cand_bits; + } + } + + return best_addr; /* may be NULL */ +} + +#if LWIP_IPV6_FORWARD +/** + * Forwards an IPv6 packet. It finds an appropriate route for the + * packet, decrements the HL value of the packet, and outputs + * the packet on the appropriate interface. + * + * @param p the packet to forward (p->payload points to IP header) + * @param iphdr the IPv6 header of the input packet + * @param inp the netif on which this packet was received + */ +static void +ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) +{ + struct netif *netif; + + /* do not forward link-local or loopback addresses */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_dest_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* Find network interface where to forward this IP packet to. */ + netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#if LWIP_IPV6_SCOPES + /* Do not forward packets with a zoned (e.g., link-local) source address + * outside of their zone. We determined the zone a bit earlier, so we know + * that the address is properly zoned here, so we can safely use has_zone. + * Also skip packets with a loopback source address (link-local implied). */ + if ((ip6_addr_has_zone(ip6_current_src_addr()) && + !ip6_addr_test_zone(ip6_current_src_addr(), netif)) || + ip6_addr_isloopback(ip6_current_src_addr())) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding packet beyond its source address zone.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } +#endif /* LWIP_IPV6_SCOPES */ + /* Do not forward packets onto the same network interface on which + * they arrived. */ + if (netif == inp) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); + IP6_STATS_INC(ip6.rterr); + IP6_STATS_INC(ip6.drop); + return; + } + + /* decrement HL */ + IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); + /* send ICMP6 if HL == 0 */ + if (IP6H_HOPLIM(iphdr) == 0) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_time_exceeded(p, ICMP6_TE_HL); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + if (netif->mtu && (p->tot_len > netif->mtu)) { +#if LWIP_ICMP6 + /* Don't send ICMP messages in response to ICMP messages */ + if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { + icmp6_packet_too_big(p, netif->mtu); + } +#endif /* LWIP_ICMP6 */ + IP6_STATS_INC(ip6.drop); + return; + } + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(ip6_current_dest_addr()), + IP6_ADDR_BLOCK2(ip6_current_dest_addr()), + IP6_ADDR_BLOCK3(ip6_current_dest_addr()), + IP6_ADDR_BLOCK4(ip6_current_dest_addr()), + IP6_ADDR_BLOCK5(ip6_current_dest_addr()), + IP6_ADDR_BLOCK6(ip6_current_dest_addr()), + IP6_ADDR_BLOCK7(ip6_current_dest_addr()), + IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); + + /* transmit pbuf on chosen interface */ + netif->output_ip6(netif, p, ip6_current_dest_addr()); + IP6_STATS_INC(ip6.fw); + IP6_STATS_INC(ip6.xmit); + return; +} +#endif /* LWIP_IPV6_FORWARD */ + +/** Return true if the current input packet should be accepted on this netif */ +static int +ip6_input_accept(struct netif *netif) +{ + /* interface is up? */ + if (netif_is_up(netif)) { + u8_t i; + /* unicast to this interface address? address configured? */ + /* If custom scopes are used, the destination zone will be tested as + * part of the local-address comparison, but we need to test the source + * scope as well (e.g., is this interface on the same link?). */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i)) +#if IPV6_CUSTOM_SCOPES + && (!ip6_addr_has_zone(ip6_current_src_addr()) || + ip6_addr_test_zone(ip6_current_src_addr(), netif)) +#endif /* IPV6_CUSTOM_SCOPES */ + ) { + /* accept on this netif */ + return 1; + } + } + } + return 0; +} + +/** + * This function is called by the network interface device driver when + * an IPv6 packet is received. The function does the basic checks of the + * IP header such as packet size being at least larger than the header + * size etc. If the packet was not destined for us, the packet is + * forwarded (using ip6_forward). + * + * Finally, the packet is sent to the upper layer protocol input function. + * + * @param p the received IPv6 packet (p->payload points to IPv6 header) + * @param inp the netif on which this packet was received + * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't + * processed, but currently always returns ERR_OK) + */ +err_t +ip6_input(struct pbuf *p, struct netif *inp) +{ + struct ip6_hdr *ip6hdr; + struct netif *netif; + const u8_t *nexth; + u16_t hlen, hlen_tot; /* the current header length */ +#if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ + @todo + int check_ip_src=1; +#endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ +#if LWIP_RAW + raw_input_state_t raw_status; +#endif /* LWIP_RAW */ + + LWIP_ASSERT_CORE_LOCKED(); + + IP6_STATS_INC(ip6.recv); + + /* identify the IP header */ + ip6hdr = (struct ip6_hdr *)p->payload; + if (IP6H_V(ip6hdr) != 6) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", + IP6H_V(ip6hdr))); + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + +#ifdef LWIP_HOOK_IP6_INPUT + if (LWIP_HOOK_IP6_INPUT(p, inp)) { + /* the packet has been eaten */ + return ERR_OK; + } +#endif + + /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ + if ((IP6_HLEN > p->len) || (IP6H_PLEN(ip6hdr) > (p->tot_len - IP6_HLEN))) { + if (IP6_HLEN > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)IP6_HLEN, p->len)); + } + if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", + (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); + } + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Trim pbuf. This should have been done at the netif layer, + * but we'll do it anyway just to be sure that its done. */ + pbuf_realloc(p, (u16_t)(IP6_HLEN + IP6H_PLEN(ip6hdr))); + + /* copy IP addresses to aligned ip6_addr_t */ + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_dest, ip6hdr->dest); + ip_addr_copy_from_ip6_packed(ip_data.current_iphdr_src, ip6hdr->src); + + /* Don't accept virtual IPv4 mapped IPv6 addresses. + * Don't accept multicast source addresses. */ + if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || + ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || + ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.err); + IP6_STATS_INC(ip6.drop); + return ERR_OK; + } + + /* Set the appropriate zone identifier on the addresses. */ + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_dest), IP6_UNKNOWN, inp); + ip6_addr_assign_zone(ip_2_ip6(&ip_data.current_iphdr_src), IP6_UNICAST, inp); + + /* current header pointer. */ + ip_data.current_ip6_header = ip6hdr; + + /* In netif, used in case we need to send ICMPv6 packets back. */ + ip_data.current_netif = inp; + ip_data.current_input_netif = inp; + + /* match packet against an interface, i.e. is this packet for us? */ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* Always joined to multicast if-local and link-local all-nodes group. */ + if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || + ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { + netif = inp; + } +#if LWIP_IPV6_MLD + else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { + netif = inp; + } +#else /* LWIP_IPV6_MLD */ + else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { + u8_t i; + /* Filter solicited node packets when MLD is not enabled + * (for Neighbor discovery). */ + netif = NULL; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { + netif = inp; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", + netif->name[0], netif->name[1])); + break; + } + } + } +#endif /* LWIP_IPV6_MLD */ + else { + netif = NULL; + } + } else { + /* start trying with inp. if that's not acceptable, start walking the + list of configured netifs. */ + if (ip6_input_accept(inp)) { + netif = inp; + } else { + netif = NULL; +#if !IPV6_CUSTOM_SCOPES + /* Shortcut: stop looking for other interfaces if either the source or + * the destination has a scope constrained to this interface. Custom + * scopes may break the 1:1 link/interface mapping, however. */ + if (ip6_addr_islinklocal(ip6_current_dest_addr()) || + ip6_addr_islinklocal(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !IPV6_CUSTOM_SCOPES */ +#if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF + /* The loopback address is to be considered link-local. Packets to it + * should be dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. + * Its implied scope means packets *from* the loopback address should + * not be accepted on other interfaces, either. These requirements + * cannot be implemented in the case that loopback traffic is sent + * across a non-loopback interface, however. */ + if (ip6_addr_isloopback(ip6_current_dest_addr()) || + ip6_addr_isloopback(ip6_current_src_addr())) { + goto netif_found; + } +#endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ +#if !LWIP_SINGLE_NETIF + NETIF_FOREACH(netif) { + if (netif == inp) { + /* we checked that before already */ + continue; + } + if (ip6_input_accept(netif)) { + break; + } + } +#endif /* !LWIP_SINGLE_NETIF */ + } +netif_found: + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", + netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); + } + + /* "::" packet source address? (used in duplicate address detection) */ + if (ip6_addr_isany(ip6_current_src_addr()) && + (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { + /* packet source is not valid */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* packet not for us? */ + if (netif == NULL) { + /* packet not for us, route or discard */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); +#if LWIP_IPV6_FORWARD + /* non-multicast packet? */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* try to forward IP packet on (other) interfaces */ + ip6_forward(p, ip6hdr, inp); + } +#endif /* LWIP_IPV6_FORWARD */ + pbuf_free(p); + goto ip6_input_cleanup; + } + + /* current netif pointer. */ + ip_data.current_netif = netif; + + /* Save next header type. */ + nexth = &IP6H_NEXTH(ip6hdr); + + /* Init header length. */ + hlen = hlen_tot = IP6_HLEN; + + /* Move to payload. */ + pbuf_remove_header(p, IP6_HLEN); + + /* Process known option extension headers, if present. */ + while (*nexth != IP6_NEXTH_NONE) + { + switch (*nexth) { + case IP6_NEXTH_HOPBYHOP: + { + s32_t opt_offset; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); + + /* Get and check the header length, while staying in packet bounds. */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_HBH_NEXTH(hbh_hdr); + + /* Get the header length. */ + hlen = (u16_t)(8 * (1 + hbh_hdr->_hlen)); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Hop-by-Hop header. */ + opt_offset = IP6_HBH_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) { + /* @todo: process IPV6 Hop-by-Hop option data */ + case IP6_PAD1_OPTION: + /* PAD1 option doesn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Hop-by-Hop header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid Hop-by-Hop option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_DESTOPTS: + { + s32_t opt_offset; + struct ip6_dest_hdr *dest_hdr; + struct ip6_opt_hdr *opt_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); + + dest_hdr = (struct ip6_dest_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_DEST_NEXTH(dest_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + dest_hdr->_hlen); + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* The extended option header starts right after Destination header. */ + opt_offset = IP6_DEST_HLEN; + while (opt_offset < hlen) + { + s32_t opt_dlen = 0; + + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)dest_hdr + opt_offset); + + switch (IP6_OPT_TYPE(opt_hdr)) + { + /* @todo: process IPV6 Destination option data */ + case IP6_PAD1_OPTION: + /* PAD1 option deosn't have length and value field */ + opt_dlen = -1; + break; + case IP6_PADN_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_ROUTER_ALERT_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_JUMBO_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + case IP6_HOME_ADDRESS_OPTION: + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + default: + /* Check 2 MSB of Destination header type. */ + switch (IP6_OPT_TYPE_ACTION(opt_hdr)) + { + case 1: + /* Discard the packet. */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 2: + /* Send ICMP Parameter Problem */ + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + case 3: + /* Send ICMP Parameter Problem if destination address is not a multicast address */ + if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { + icmp6_param_problem(p, ICMP6_PP_OPTION, opt_hdr); + } + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid destination option type dropped.\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + default: + /* Skip over this option. */ + opt_dlen = IP6_OPT_DLEN(opt_hdr); + break; + } + break; + } + + /* Adjust the offset to move to the next extended option header */ + opt_offset = opt_offset + IP6_OPT_HLEN + opt_dlen; + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_ROUTING: + { + struct ip6_rout_hdr *rout_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); + + rout_hdr = (struct ip6_rout_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_ROUT_NEXTH(rout_hdr); + + /* Get the header length. */ + hlen = 8 * (1 + rout_hdr->_hlen); + + if ((p->len < 8) || (hlen > p->len)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_STATS_INC(ip6.lenerr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Skip over this header. */ + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* if segment left value is 0 in routing header, ignore the option */ + if (IP6_ROUT_SEG_LEFT(rout_hdr)) { + /* The length field of routing option header must be even */ + if (rout_hdr->_hlen & 0x1) { + /* Discard and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &rout_hdr->_hlen); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + switch (IP6_ROUT_TYPE(rout_hdr)) + { + /* TODO: process routing by the type */ + case IP6_ROUT_TYPE2: + break; + case IP6_ROUT_RPL: + break; + default: + /* Discard unrecognized routing type and send parameter field error */ + icmp6_param_problem(p, ICMP6_PP_FIELD, &IP6_ROUT_TYPE(rout_hdr)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid routing type dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + + pbuf_remove_header(p, hlen); + break; + } + case IP6_NEXTH_FRAGMENT: + { + struct ip6_frag_hdr *frag_hdr; + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); + + frag_hdr = (struct ip6_frag_hdr *)p->payload; + + /* Get next header type. */ + nexth = &IP6_FRAG_NEXTH(frag_hdr); + + /* Fragment Header length. */ + hlen = 8; + + /* Make sure this header fits in current pbuf. */ + if (hlen > p->len) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", + hlen, p->len)); + /* free (drop) packet pbufs */ + pbuf_free(p); + IP6_FRAG_STATS_INC(ip6_frag.lenerr); + IP6_FRAG_STATS_INC(ip6_frag.drop); + goto ip6_input_cleanup; + } + + hlen_tot = (u16_t)(hlen_tot + hlen); + + /* check payload length is multiple of 8 octets when mbit is set */ + if (IP6_FRAG_MBIT(frag_hdr) && (IP6H_PLEN(ip6hdr) & 0x7)) { + /* ipv6 payload length is not multiple of 8 octets */ + icmp6_param_problem(p, ICMP6_PP_FIELD, LWIP_PACKED_CAST(const void *, &ip6hdr->_plen)); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with invalid payload length dropped\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + + /* Offset == 0 and more_fragments == 0? */ + if ((frag_hdr->_fragment_offset & + PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { + /* This is a 1-fragment packet. Skip this header and continue. */ + pbuf_remove_header(p, hlen); + } else { +#if LWIP_IPV6_REASS + /* reassemble the packet */ + ip_data.current_ip_header_tot_len = hlen_tot; + p = ip6_reass(p); + /* packet not fully reassembled yet? */ + if (p == NULL) { + goto ip6_input_cleanup; + } + + /* Returned p point to IPv6 header. + * Update all our variables and pointers and continue. */ + ip6hdr = (struct ip6_hdr *)p->payload; + nexth = &IP6H_NEXTH(ip6hdr); + hlen = hlen_tot = IP6_HLEN; + pbuf_remove_header(p, IP6_HLEN); + +#else /* LWIP_IPV6_REASS */ + /* free (drop) packet pbufs */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.opterr); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; +#endif /* LWIP_IPV6_REASS */ + } + break; + } + default: + goto options_done; + } + + if (*nexth == IP6_NEXTH_HOPBYHOP) { + /* Hop-by-Hop header comes only as a first option */ + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header dropped (only valid as a first option)\n")); + pbuf_free(p); + IP6_STATS_INC(ip6.drop); + goto ip6_input_cleanup; + } + } + +options_done: + + /* send to upper layers */ + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); + ip6_debug_print(p); + LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); + + ip_data.current_ip_header_tot_len = hlen_tot; + +#if LWIP_RAW + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* raw input did not eat the packet? */ + raw_status = raw_input(p, inp); + if (raw_status != RAW_INPUT_EATEN) + { + /* Point to payload. */ + pbuf_remove_header(p, hlen_tot); +#else /* LWIP_RAW */ + { +#endif /* LWIP_RAW */ + switch (*nexth) { + case IP6_NEXTH_NONE: + pbuf_free(p); + break; +#if LWIP_UDP + case IP6_NEXTH_UDP: +#if LWIP_UDPLITE + case IP6_NEXTH_UDPLITE: +#endif /* LWIP_UDPLITE */ + udp_input(p, inp); + break; +#endif /* LWIP_UDP */ +#if LWIP_TCP + case IP6_NEXTH_TCP: + tcp_input(p, inp); + break; +#endif /* LWIP_TCP */ +#if LWIP_ICMP6 + case IP6_NEXTH_ICMP6: + icmp6_input(p, inp); + break; +#endif /* LWIP_ICMP */ + default: +#if LWIP_RAW + if (raw_status == RAW_INPUT_DELIVERED) { + /* @todo: ipv6 mib in-delivers? */ + } else +#endif /* LWIP_RAW */ + { +#if LWIP_ICMP6 + /* p points to IPv6 header again for raw_input. */ + pbuf_add_header_force(p, hlen_tot); + /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ + if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && + (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { + icmp6_param_problem(p, ICMP6_PP_HEADER, nexth); + } +#endif /* LWIP_ICMP */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); + IP6_STATS_INC(ip6.proterr); + IP6_STATS_INC(ip6.drop); + } + pbuf_free(p); + break; + } + } + +ip6_input_cleanup: + ip_data.current_netif = NULL; + ip_data.current_input_netif = NULL; + ip_data.current_ip6_header = NULL; + ip_data.current_ip_header_tot_len = 0; + ip6_addr_set_zero(ip6_current_src_addr()); + ip6_addr_set_zero(ip6_current_dest_addr()); + + return ERR_OK; +} + + +/** + * Sends an IPv6 packet on a network interface. This function constructs + * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is + * used as source (usually during network startup). If the source IPv6 address it + * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network + * interface is filled in as source address. If the destination IPv6 address is + * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and + * p->payload points to it instead of the data. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) (src is possibly not + * properly zoned) + * @param dest the destination IPv6 address to send the packet to (possibly not + * properly zoned) + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif the netif on which to send this packet + * @return ERR_OK if the packet was sent OK + * ERR_BUF if p doesn't have enough space for IPv6/LINK headers + * returns errors returned by netif->output_ip6 + */ +err_t +ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + const ip6_addr_t *src_used = src; + if (dest != LWIP_IP_HDRINCL) { + if (src != NULL && ip6_addr_isany(src)) { + src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); + if ((src_used == NULL) || ip6_addr_isany(src_used)) { + /* No appropriate source address was found for this packet. */ + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + } + } + return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); +} + +/** + * Same as ip6_output_if() but 'src' address is not replaced by netif address + * when it is 'any'. + */ +err_t +ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, + u8_t nexth, struct netif *netif) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest_addr; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + /* Should the IPv6 header be generated or is it already included in p? */ + if (dest != LWIP_IP_HDRINCL) { +#if LWIP_IPV6_SCOPES + /* If the destination address is scoped but lacks a zone, add a zone now, + * based on the outgoing interface. The lower layers (e.g., nd6) absolutely + * require addresses to be properly zoned for correctness. In some cases, + * earlier attempts will have been made to add a zone to the destination, + * but this function is the only one that is called in all (other) cases, + * so we must do this here. */ + if (ip6_addr_lacks_zone(dest, IP6_UNKNOWN)) { + ip6_addr_copy(dest_addr, *dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } +#endif /* LWIP_IPV6_SCOPES */ + + /* generate IPv6 header */ + if (pbuf_add_header(p, IP6_HLEN)) { + LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + ip6hdr = (struct ip6_hdr *)p->payload; + LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", + (p->len >= sizeof(struct ip6_hdr))); + + IP6H_HOPLIM_SET(ip6hdr, hl); + IP6H_NEXTH_SET(ip6hdr, nexth); + + /* dest cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->dest, *dest); + + IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); + IP6H_PLEN_SET(ip6hdr, (u16_t)(p->tot_len - IP6_HLEN)); + + if (src == NULL) { + src = IP6_ADDR_ANY6; + } + /* src cannot be NULL here */ + ip6_addr_copy_to_packed(ip6hdr->src, *src); + + } else { + /* IP header already included in p */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + ip6_addr_assign_zone(&dest_addr, IP6_UNKNOWN, netif); + dest = &dest_addr; + } + + IP6_STATS_INC(ip6.xmit); + + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); + ip6_debug_print(p); + +#if ENABLE_LOOPBACK + { + int i; +#if !LWIP_HAVE_LOOPIF + if (ip6_addr_isloopback(dest)) { + return netif_loop_output(netif, p); + } +#endif /* !LWIP_HAVE_LOOPIF */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { + /* Packet to self, enqueue it for loopback */ + LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); + return netif_loop_output(netif, p); + } + } + } +#if LWIP_MULTICAST_TX_OPTIONS + if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { + netif_loop_output(netif, p); + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ +#endif /* ENABLE_LOOPBACK */ +#if LWIP_IPV6_FRAG + /* don't fragment if interface has mtu set to 0 [loopif] */ + if (netif_mtu6(netif) && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { + return ip6_frag(p, netif, dest); + } +#endif /* LWIP_IPV6_FRAG */ + + LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); + return netif->output_ip6(netif, p, dest); +} + +/** + * Simple interface to ip6_output_if. It finds the outgoing network + * interface and calls upon ip6_output_if to do the actual work. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + return ip6_output_if(p, src, dest, hl, tc, nexth, netif); +} + + +#if LWIP_NETIF_USE_HINTS +/** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint + * before calling ip6_output_if. + * + * @param p the packet to send (p->payload points to the data, e.g. next + protocol header; if dest == LWIP_IP_HDRINCL, p already includes an + IPv6 header and p->payload points to that IPv6 header) + * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an + * IP address of the netif is selected and used as source address. + * if src == NULL, IP6_ADDR_ANY is used as source) + * @param dest the destination IPv6 address to send the packet to + * @param hl the Hop Limit value to be set in the IPv6 header + * @param tc the Traffic Class value to be set in the IPv6 header + * @param nexth the Next Header to be set in the IPv6 header + * @param netif_hint netif output hint pointer set to netif->hint before + * calling ip_output_if() + * + * @return ERR_RTE if no route is found + * see ip_output_if() for more return values + */ +err_t +ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint) +{ + struct netif *netif; + struct ip6_hdr *ip6hdr; + ip6_addr_t src_addr, dest_addr; + err_t err; + + LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); + + if (dest != LWIP_IP_HDRINCL) { + netif = ip6_route(src, dest); + } else { + /* IP header included in p, read addresses. */ + ip6hdr = (struct ip6_hdr *)p->payload; + ip6_addr_copy_from_packed(src_addr, ip6hdr->src); + ip6_addr_copy_from_packed(dest_addr, ip6hdr->dest); + netif = ip6_route(&src_addr, &dest_addr); + } + + if (netif == NULL) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", + IP6_ADDR_BLOCK1(dest), + IP6_ADDR_BLOCK2(dest), + IP6_ADDR_BLOCK3(dest), + IP6_ADDR_BLOCK4(dest), + IP6_ADDR_BLOCK5(dest), + IP6_ADDR_BLOCK6(dest), + IP6_ADDR_BLOCK7(dest), + IP6_ADDR_BLOCK8(dest))); + IP6_STATS_INC(ip6.rterr); + return ERR_RTE; + } + + NETIF_SET_HINTS(netif, netif_hint); + err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); + NETIF_RESET_HINTS(netif); + + return err; +} +#endif /* LWIP_NETIF_USE_HINTS*/ + +#if LWIP_IPV6_MLD +/** + * Add a hop-by-hop options header with a router alert option and padding. + * + * Used by MLD when sending a Multicast listener report/done message. + * + * @param p the packet to which we will prepend the options header + * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) + * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) + * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise + */ +err_t +ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) +{ + u8_t *opt_data; + u32_t offset = 0; + struct ip6_hbh_hdr *hbh_hdr; + struct ip6_opt_hdr *opt_hdr; + + /* fixed 4 bytes for router alert option and 2 bytes padding */ + const u8_t hlen = (sizeof(struct ip6_opt_hdr) * 2) + IP6_ROUTER_ALERT_DLEN; + /* Move pointer to make room for hop-by-hop options header. */ + if (pbuf_add_header(p, sizeof(struct ip6_hbh_hdr) + hlen)) { + LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); + IP6_STATS_INC(ip6.err); + return ERR_BUF; + } + + /* Set fields of Hop-by-Hop header */ + hbh_hdr = (struct ip6_hbh_hdr *)p->payload; + IP6_HBH_NEXTH(hbh_hdr) = nexth; + hbh_hdr->_hlen = 0; + offset = IP6_HBH_HLEN; + + /* Set router alert options to Hop-by-Hop extended option header */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_ROUTER_ALERT_OPTION; + IP6_OPT_DLEN(opt_hdr) = IP6_ROUTER_ALERT_DLEN; + offset += IP6_OPT_HLEN; + + /* Set router alert option data */ + opt_data = (u8_t *)hbh_hdr + offset; + opt_data[0] = value; + opt_data[1] = 0; + offset += IP6_OPT_DLEN(opt_hdr); + + /* add 2 bytes padding to make 8 bytes Hop-by-Hop header length */ + opt_hdr = (struct ip6_opt_hdr *)((u8_t *)hbh_hdr + offset); + IP6_OPT_TYPE(opt_hdr) = IP6_PADN_OPTION; + IP6_OPT_DLEN(opt_hdr) = 0; + + return ERR_OK; +} +#endif /* LWIP_IPV6_MLD */ + +#if IP6_DEBUG +/* Print an IPv6 header by using LWIP_DEBUGF + * @param p an IPv6 packet, p->payload pointing to the IPv6 header + */ +void +ip6_debug_print(struct pbuf *p) +{ + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + + LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", + IP6H_V(ip6hdr), + IP6H_TC(ip6hdr), + IP6H_FL(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", + IP6H_PLEN(ip6hdr), + IP6H_NEXTH(ip6hdr), + IP6H_HOPLIM(ip6hdr))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->src)), + IP6_ADDR_BLOCK2(&(ip6hdr->src)), + IP6_ADDR_BLOCK3(&(ip6hdr->src)), + IP6_ADDR_BLOCK4(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->src)), + IP6_ADDR_BLOCK6(&(ip6hdr->src)), + IP6_ADDR_BLOCK7(&(ip6hdr->src)), + IP6_ADDR_BLOCK8(&(ip6hdr->src)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", + IP6_ADDR_BLOCK1(&(ip6hdr->dest)), + IP6_ADDR_BLOCK2(&(ip6hdr->dest)), + IP6_ADDR_BLOCK3(&(ip6hdr->dest)), + IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", + IP6_ADDR_BLOCK5(&(ip6hdr->dest)), + IP6_ADDR_BLOCK6(&(ip6hdr->dest)), + IP6_ADDR_BLOCK7(&(ip6hdr->dest)), + IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); + LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); +} +#endif /* IP6_DEBUG */ + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c new file mode 100644 index 0000000..aafba72 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_addr.c @@ -0,0 +1,343 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Functions for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/def.h" + +#include + +#if LWIP_IPV4 +#include "lwip/ip4_addr.h" /* for ip6addr_aton to handle IPv4-mapped addresses */ +#endif /* LWIP_IPV4 */ + +/* used by IP6_ADDR_ANY(6) in ip6_addr.h */ +const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); + +#define lwip_xchar(i) ((char)((i) < 10 ? '0' + (i) : 'A' + (i) - 10)) + +/** + * Check whether "cp" is a valid ascii representation + * of an IPv6 address and convert to a binary address. + * Returns 1 if the address is valid, 0 if not. + * + * @param cp IPv6 address in ascii representation (e.g. "FF01::1") + * @param addr pointer to which to save the ip address in network order + * @return 1 if cp could be converted to addr, 0 on failure + */ +int +ip6addr_aton(const char *cp, ip6_addr_t *addr) +{ + u32_t addr_index, zero_blocks, current_block_index, current_block_value; + const char *s; +#if LWIP_IPV4 + int check_ipv4_mapped = 0; +#endif /* LWIP_IPV4 */ + + /* Count the number of colons, to count the number of blocks in a "::" sequence + zero_blocks may be 1 even if there are no :: sequences */ + zero_blocks = 8; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + zero_blocks--; +#if LWIP_IPV4 + } else if (*s == '.') { + if ((zero_blocks == 5) ||(zero_blocks == 2)) { + check_ipv4_mapped = 1; + /* last block could be the start of an IPv4 address */ + zero_blocks--; + } else { + /* invalid format */ + return 0; + } + break; +#endif /* LWIP_IPV4 */ + } else if (!lwip_isxdigit(*s)) { + break; + } + } + + /* parse each block */ + addr_index = 0; + current_block_index = 0; + current_block_value = 0; + for (s = cp; *s != 0; s++) { + if (*s == ':') { + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } + } + current_block_index++; +#if LWIP_IPV4 + if (check_ipv4_mapped) { + if (current_block_index == 6) { + ip4_addr_t ip4; + int ret = ip4addr_aton(s + 1, &ip4); + if (ret) { + if (addr) { + addr->addr[3] = lwip_htonl(ip4.addr); + current_block_index++; + goto fix_byte_order_and_return; + } + return 1; + } + } + } +#endif /* LWIP_IPV4 */ + current_block_value = 0; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + if (s[1] == ':') { + if (s[2] == ':') { + /* invalid format: three successive colons */ + return 0; + } + s++; + /* "::" found, set zeros */ + while (zero_blocks > 0) { + zero_blocks--; + if (current_block_index & 0x1) { + addr_index++; + } else { + if (addr) { + addr->addr[addr_index] = 0; + } + } + current_block_index++; + if (current_block_index > 7) { + /* address too long! */ + return 0; + } + } + } + } else if (lwip_isxdigit(*s)) { + /* add current digit */ + current_block_value = (current_block_value << 4) + + (lwip_isdigit(*s) ? (u32_t)(*s - '0') : + (u32_t)(10 + (lwip_islower(*s) ? *s - 'a' : *s - 'A'))); + } else { + /* unexpected digit, space? CRLF? */ + break; + } + } + + if (addr) { + if (current_block_index & 0x1) { + addr->addr[addr_index++] |= current_block_value; + } + else { + addr->addr[addr_index] = current_block_value << 16; + } +#if LWIP_IPV4 +fix_byte_order_and_return: +#endif + /* convert to network byte order. */ + for (addr_index = 0; addr_index < 4; addr_index++) { + addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); + } + + ip6_addr_clear_zone(addr); + } + + if (current_block_index != 7) { + return 0; + } + + return 1; +} + +/** + * Convert numeric IPv6 address into ASCII representation. + * returns ptr to static buffer; not reentrant! + * + * @param addr ip6 address in network order to convert + * @return pointer to a global static (!) buffer that holds the ASCII + * representation of addr + */ +char * +ip6addr_ntoa(const ip6_addr_t *addr) +{ + static char str[40]; + return ip6addr_ntoa_r(addr, str, 40); +} + +/** + * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. + * + * @param addr ip6 address in network order to convert + * @param buf target buffer where the string is stored + * @param buflen length of buf + * @return either pointer to buf which now holds the ASCII + * representation of addr or NULL if buf was too small + */ +char * +ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) +{ + u32_t current_block_index, current_block_value, next_block_value; + s32_t i; + u8_t zero_flag, empty_block_flag; + +#if LWIP_IPV4 + if (ip6_addr_isipv4mappedipv6(addr)) { + /* This is an IPv4 mapped address */ + ip4_addr_t addr4; + char *ret; +#define IP4MAPPED_HEADER "::FFFF:" + char *buf_ip4 = buf + sizeof(IP4MAPPED_HEADER) - 1; + int buflen_ip4 = buflen - sizeof(IP4MAPPED_HEADER) + 1; + if (buflen < (int)sizeof(IP4MAPPED_HEADER)) { + return NULL; + } + memcpy(buf, IP4MAPPED_HEADER, sizeof(IP4MAPPED_HEADER)); + addr4.addr = addr->addr[3]; + ret = ip4addr_ntoa_r(&addr4, buf_ip4, buflen_ip4); + if (ret != buf_ip4) { + return NULL; + } + return buf; + } +#endif /* LWIP_IPV4 */ + i = 0; + empty_block_flag = 0; /* used to indicate a zero chain for "::' */ + + for (current_block_index = 0; current_block_index < 8; current_block_index++) { + /* get the current 16-bit block */ + current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); + if ((current_block_index & 0x1) == 0) { + current_block_value = current_block_value >> 16; + } + current_block_value &= 0xffff; + + /* Check for empty block. */ + if (current_block_value == 0) { + if (current_block_index == 7 && empty_block_flag == 1) { + /* special case, we must render a ':' for the last block. */ + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + break; + } + if (empty_block_flag == 0) { + /* generate empty block "::", but only if more than one contiguous zero block, + * according to current formatting suggestions RFC 5952. */ + next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); + if ((current_block_index & 0x1) == 0x01) { + next_block_value = next_block_value >> 16; + } + next_block_value &= 0xffff; + if (next_block_value == 0) { + empty_block_flag = 1; + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + continue; /* move on to next block. */ + } + } else if (empty_block_flag == 1) { + /* move on to next block. */ + continue; + } + } else if (empty_block_flag == 1) { + /* Set this flag value so we don't produce multiple empty blocks. */ + empty_block_flag = 2; + } + + if (current_block_index > 0) { + buf[i++] = ':'; + if (i >= buflen) { + return NULL; + } + } + + if ((current_block_value & 0xf000) == 0) { + zero_flag = 1; + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf000) >> 12)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf00) == 0) && (zero_flag)) { + /* do nothing */ + } else { + buf[i++] = lwip_xchar(((current_block_value & 0xf00) >> 8)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + if (((current_block_value & 0xf0) == 0) && (zero_flag)) { + /* do nothing */ + } + else { + buf[i++] = lwip_xchar(((current_block_value & 0xf0) >> 4)); + zero_flag = 0; + if (i >= buflen) { + return NULL; + } + } + + buf[i++] = lwip_xchar((current_block_value & 0xf)); + if (i >= buflen) { + return NULL; + } + } + + buf[i] = 0; + + return buf; +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c new file mode 100644 index 0000000..0f277b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/ip6_frag.c @@ -0,0 +1,862 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" +#include "lwip/ip6_frag.h" +#include "lwip/ip6.h" +#include "lwip/icmp6.h" +#include "lwip/nd6.h" +#include "lwip/ip.h" + +#include "lwip/pbuf.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + + +/** Setting this to 0, you can turn off checking the fragments for overlapping + * regions. The code gets a little smaller. Only use this if you know that + * overlapping won't occur on your network! */ +#ifndef IP_REASS_CHECK_OVERLAP +#define IP_REASS_CHECK_OVERLAP 1 +#endif /* IP_REASS_CHECK_OVERLAP */ + +/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is + * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. + * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA + * is set to 1, so one datagram can be reassembled at a time, only. */ +#ifndef IP_REASS_FREE_OLDEST +#define IP_REASS_FREE_OLDEST 1 +#endif /* IP_REASS_FREE_OLDEST */ + +#if IPV6_FRAG_COPYHEADER +/* The number of bytes we need to "borrow" from (i.e., overwrite in) the header + * that precedes the fragment header for reassembly pruposes. */ +#define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) +#endif + +#define IP_REASS_FLAG_LASTFRAG 0x01 + +/** This is a helper struct which holds the starting + * offset and the ending offset of this fragment to + * easily chain the fragments. + * It has the same packing requirements as the IPv6 header, since it replaces + * the Fragment Header in memory in incoming fragments to keep + * track of the various fragments. + */ +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/bpstruct.h" +#endif +PACK_STRUCT_BEGIN +struct ip6_reass_helper { + PACK_STRUCT_FIELD(struct pbuf *next_pbuf); + PACK_STRUCT_FIELD(u16_t start); + PACK_STRUCT_FIELD(u16_t end); +} PACK_STRUCT_STRUCT; +PACK_STRUCT_END +#ifdef PACK_STRUCT_USE_INCLUDES +# include "arch/epstruct.h" +#endif + +/* static variables */ +static struct ip6_reassdata *reassdatagrams; +static u16_t ip6_reass_pbufcount; + +/* Forward declarations. */ +static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); +#if IP_REASS_FREE_OLDEST +static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); +#endif /* IP_REASS_FREE_OLDEST */ + +void +ip6_reass_tmr(void) +{ + struct ip6_reassdata *r, *tmp; + +#if !IPV6_FRAG_COPYHEADER + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* !IPV6_FRAG_COPYHEADER */ + + r = reassdatagrams; + while (r != NULL) { + /* Decrement the timer. Once it reaches 0, + * clean up the incomplete fragment assembly */ + if (r->timer > 0) { + r->timer--; + r = r->next; + } else { + /* reassembly timed out */ + tmp = r; + /* get the next pointer before freeing */ + r = r->next; + /* free the helper struct and all enqueued pbufs */ + ip6_reass_free_complete_datagram(tmp); + } + } +} + +/** + * Free a datagram (struct ip6_reassdata) and all its pbufs. + * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), + * sends an ICMP time exceeded packet. + * + * @param ipr datagram to free + */ +static void +ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) +{ + struct ip6_reassdata *prev; + u16_t pbufs_freed = 0; + u16_t clen; + struct pbuf *p; + struct ip6_reass_helper *iprh; + +#if LWIP_ICMP6 + iprh = (struct ip6_reass_helper *)ipr->p->payload; + if (iprh->start == 0) { + /* The first fragment was received, send ICMP time exceeded. */ + /* First, de-queue the first pbuf from r->p. */ + p = ipr->p; + ipr->p = iprh->next_pbuf; + /* Restore the part that we've overwritten with our helper structure, or we + * might send garbage (and disclose a pointer) in the ICMPv6 reply. */ + MEMCPY(p->payload, ipr->orig_hdr, sizeof(iprh)); + /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). + This cannot fail since we already checked when receiving this fragment. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr))) { + LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); + } + else { + /* Reconstruct the zoned source and destination addresses, so that we do + * not end up sending the ICMP response over the wrong link. */ + ip6_addr_t src_addr, dest_addr; + ip6_addr_copy_from_packed(src_addr, IPV6_FRAG_SRC(ipr)); + ip6_addr_set_zone(&src_addr, ipr->src_zone); + ip6_addr_copy_from_packed(dest_addr, IPV6_FRAG_DEST(ipr)); + ip6_addr_set_zone(&dest_addr, ipr->dest_zone); + /* Send the actual ICMP response. */ + icmp6_time_exceeded_with_addrs(p, ICMP6_TE_FRAG, &src_addr, &dest_addr); + } + clen = pbuf_clen(p); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(p); + } +#endif /* LWIP_ICMP6 */ + + /* First, free all received pbufs. The individual pbufs need to be released + separately as they have not yet been chained */ + p = ipr->p; + while (p != NULL) { + struct pbuf *pcur; + iprh = (struct ip6_reass_helper *)p->payload; + pcur = p; + /* get the next pointer before freeing */ + p = iprh->next_pbuf; + clen = pbuf_clen(pcur); + LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); + pbufs_freed = (u16_t)(pbufs_freed + clen); + pbuf_free(pcur); + } + + /* Then, unchain the struct ip6_reassdata from the list and free it. */ + if (ipr == reassdatagrams) { + reassdatagrams = ipr->next; + } else { + prev = reassdatagrams; + while (prev != NULL) { + if (prev->next == ipr) { + break; + } + prev = prev->next; + } + if (prev != NULL) { + prev->next = ipr->next; + } + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* Finally, update number of pbufs in reassembly queue */ + LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - pbufs_freed); +} + +#if IP_REASS_FREE_OLDEST +/** + * Free the oldest datagram to make room for enqueueing new fragments. + * The datagram ipr is not freed! + * + * @param ipr ip6_reassdata for the current fragment + * @param pbufs_needed number of pbufs needed to enqueue + * (used for freeing other datagrams if not enough space) + */ +static void +ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) +{ + struct ip6_reassdata *r, *oldest; + + /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, + * but don't free the current datagram! */ + do { + r = oldest = reassdatagrams; + while (r != NULL) { + if (r != ipr) { + if (r->timer <= oldest->timer) { + /* older than the previous oldest */ + oldest = r; + } + } + r = r->next; + } + if (oldest == ipr) { + /* nothing to free, ipr is the only element on the list */ + return; + } + if (oldest != NULL) { + ip6_reass_free_complete_datagram(oldest); + } + } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); +} +#endif /* IP_REASS_FREE_OLDEST */ + +/** + * Reassembles incoming IPv6 fragments into an IPv6 datagram. + * + * @param p points to the IPv6 Fragment Header + * @return NULL if reassembly is incomplete, pbuf pointing to + * IPv6 Header if reassembly is complete + */ +struct pbuf * +ip6_reass(struct pbuf *p) +{ + struct ip6_reassdata *ipr, *ipr_prev; + struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; + struct ip6_frag_hdr *frag_hdr; + u16_t offset, len, start, end; + ptrdiff_t hdrdiff; + u16_t clen; + u8_t valid = 1; + struct pbuf *q, *next_pbuf; + + IP6_FRAG_STATS_INC(ip6_frag.recv); + + /* ip6_frag_hdr must be in the first pbuf, not chained. Checked by caller. */ + LWIP_ASSERT("IPv6 fragment header does not fit in first pbuf", + p->len >= sizeof(struct ip6_frag_hdr)); + + frag_hdr = (struct ip6_frag_hdr *) p->payload; + + clen = pbuf_clen(p); + + offset = lwip_ntohs(frag_hdr->_fragment_offset); + + /* Calculate fragment length from IPv6 payload length. + * Adjust for headers before Fragment Header. + * And finally adjust by Fragment Header length. */ + len = lwip_ntohs(ip6_current_header()->_plen); + hdrdiff = (u8_t*)p->payload - (const u8_t*)ip6_current_header(); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff <= 0xFFFF); + LWIP_ASSERT("not a valid pbuf (ip6_input check missing?)", hdrdiff >= IP6_HLEN); + hdrdiff -= IP6_HLEN; + hdrdiff += IP6_FRAG_HLEN; + if (hdrdiff > len) { + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + len = (u16_t)(len - hdrdiff); + start = (offset & IP6_FRAG_OFFSET_MASK); + if (start > (0xFFFF - len)) { + /* u16_t overflow, cannot handle this */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + + /* Look for the datagram the fragment belongs to in the current datagram queue, + * remembering the previous in the queue for later dequeueing. */ + for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { + /* Check if the incoming fragment matches the one currently present + in the reassembly buffer. If so, we proceed with copying the + fragment into the buffer. */ + if ((frag_hdr->_identification == ipr->identification) && + ip6_addr_cmp_packed(ip6_current_src_addr(), &(IPV6_FRAG_SRC(ipr)), ipr->src_zone) && + ip6_addr_cmp_packed(ip6_current_dest_addr(), &(IPV6_FRAG_DEST(ipr)), ipr->dest_zone)) { + IP6_FRAG_STATS_INC(ip6_frag.cachehit); + break; + } + ipr_prev = ipr; + } + + if (ipr == NULL) { + /* Enqueue a new datagram into the datagram queue */ + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr == NULL) { +#if IP_REASS_FREE_OLDEST + /* Make room and try again. */ + ip6_reass_remove_oldest_datagram(ipr, clen); + ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); + if (ipr != NULL) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + memset(ipr, 0, sizeof(struct ip6_reassdata)); + ipr->timer = IPV6_REASS_MAXAGE; + + /* enqueue the new structure to the front of the list */ + ipr->next = reassdatagrams; + reassdatagrams = ipr; + + /* Use the current IPv6 header for src/dest address reference. + * Eventually, we will replace it when we get the first fragment + * (it might be this one, in any case, it is done later). */ + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; +#if IPV6_FRAG_COPYHEADER + MEMCPY(&ipr->src, &ip6_current_header()->src, sizeof(ipr->src)); + MEMCPY(&ipr->dest, &ip6_current_header()->dest, sizeof(ipr->dest)); +#endif /* IPV6_FRAG_COPYHEADER */ +#if LWIP_IPV6_SCOPES + /* Also store the address zone information. + * @todo It is possible that due to netif destruction and recreation, the + * stored zones end up resolving to a different interface. In that case, we + * risk sending a "time exceeded" ICMP response over the wrong link. + * Ideally, netif destruction would clean up matching pending reassembly + * structures, but custom zone mappings would make that non-trivial. */ + ipr->src_zone = ip6_addr_zone(ip6_current_src_addr()); + ipr->dest_zone = ip6_addr_zone(ip6_current_dest_addr()); +#endif /* LWIP_IPV6_SCOPES */ + /* copy the fragmented packet id. */ + ipr->identification = frag_hdr->_identification; + + /* copy the nexth field */ + ipr->nexth = frag_hdr->_nexth; + } + + /* Check if we are allowed to enqueue more datagrams. */ + if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { +#if IP_REASS_FREE_OLDEST + ip6_reass_remove_oldest_datagram(ipr, clen); + if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { + /* re-search ipr_prev since it might have been removed */ + for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { + if (ipr_prev->next == ipr) { + break; + } + } + } else +#endif /* IP_REASS_FREE_OLDEST */ + { + /* @todo: send ICMPv6 time exceeded here? */ + /* drop this pbuf */ + IP6_FRAG_STATS_INC(ip6_frag.memerr); + goto nullreturn; + } + } + + /* Overwrite Fragment Header with our own helper struct. */ +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). + This cannot fail since we already checked when receiving this fragment. */ + u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#else /* IPV6_FRAG_COPYHEADER */ + LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", + sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); +#endif /* IPV6_FRAG_COPYHEADER */ + + /* Prepare the pointer to the helper structure, and its initial values. + * Do not yet write to the structure itself, as we still have to make a + * backup of the original data, and we should not do that until we know for + * sure that we are going to add this packet to the list. */ + iprh = (struct ip6_reass_helper *)p->payload; + next_pbuf = NULL; + end = (u16_t)(start + len); + + /* find the right place to insert this pbuf */ + /* Iterate through until we either get to the end of the list (append), + * or we find on with a larger offset (insert). */ + for (q = ipr->p; q != NULL;) { + iprh_tmp = (struct ip6_reass_helper*)q->payload; + if (start < iprh_tmp->start) { +#if IP_REASS_CHECK_OVERLAP + if (end > iprh_tmp->start) { + /* fragment overlaps with following, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + if (iprh_prev != NULL) { + if (start < iprh_prev->end) { + /* fragment overlaps with previous, throw away */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; + } + } +#endif /* IP_REASS_CHECK_OVERLAP */ + /* the new pbuf should be inserted before this */ + next_pbuf = q; + if (iprh_prev != NULL) { + /* not the fragment with the lowest offset */ + iprh_prev->next_pbuf = p; + } else { + /* fragment with the lowest offset */ + ipr->p = p; + } + break; + } else if (start == iprh_tmp->start) { + /* received the same datagram twice: no need to keep the datagram */ + goto nullreturn; +#if IP_REASS_CHECK_OVERLAP + } else if (start < iprh_tmp->end) { + /* overlap: no need to keep the new datagram */ + IP6_FRAG_STATS_INC(ip6_frag.proterr); + goto nullreturn; +#endif /* IP_REASS_CHECK_OVERLAP */ + } else { + /* Check if the fragments received so far have no gaps. */ + if (iprh_prev != NULL) { + if (iprh_prev->end != iprh_tmp->start) { + /* There is a fragment missing between the current + * and the previous fragment */ + valid = 0; + } + } + } + q = iprh_tmp->next_pbuf; + iprh_prev = iprh_tmp; + } + + /* If q is NULL, then we made it to the end of the list. Determine what to do now */ + if (q == NULL) { + if (iprh_prev != NULL) { + /* this is (for now), the fragment with the highest offset: + * chain it to the last fragment */ +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= start); +#endif /* IP_REASS_CHECK_OVERLAP */ + iprh_prev->next_pbuf = p; + if (iprh_prev->end != start) { + valid = 0; + } + } else { +#if IP_REASS_CHECK_OVERLAP + LWIP_ASSERT("no previous fragment, this must be the first fragment!", + ipr->p == NULL); +#endif /* IP_REASS_CHECK_OVERLAP */ + /* this is the first fragment we ever received for this ip datagram */ + ipr->p = p; + } + } + + /* Track the current number of pbufs current 'in-flight', in order to limit + the number of fragments that may be enqueued at any one time */ + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount + clen); + + /* Remember IPv6 header if this is the first fragment. */ + if (start == 0) { + /* need to use the none-const pointer here: */ + ipr->iphdr = ip_data.current_ip6_header; + /* Make a backup of the part of the packet data that we are about to + * overwrite, so that we can restore the original later. */ + MEMCPY(ipr->orig_hdr, p->payload, sizeof(*iprh)); + /* For IPV6_FRAG_COPYHEADER there is no need to copy src/dst again, as they + * will be the same as they were. With LWIP_IPV6_SCOPES, the same applies + * to the source/destination zones. */ + } + /* Only after the backup do we get to fill in the actual helper structure. */ + iprh->next_pbuf = next_pbuf; + iprh->start = start; + iprh->end = end; + + /* If this is the last fragment, calculate total packet length. */ + if ((offset & IP6_FRAG_MORE_FLAG) == 0) { + ipr->datagram_len = iprh->end; + } + + /* Additional validity tests: we have received first and last fragment. */ + iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; + if (iprh_tmp->start != 0) { + valid = 0; + } + if (ipr->datagram_len == 0) { + valid = 0; + } + + /* Final validity test: no gaps between current and last fragment. */ + iprh_prev = iprh; + q = iprh->next_pbuf; + while ((q != NULL) && valid) { + iprh = (struct ip6_reass_helper*)q->payload; + if (iprh_prev->end != iprh->start) { + valid = 0; + break; + } + iprh_prev = iprh; + q = iprh->next_pbuf; + } + + if (valid) { + /* All fragments have been received */ + struct ip6_hdr* iphdr_ptr; + + /* chain together the pbufs contained within the ip6_reassdata list. */ + iprh = (struct ip6_reass_helper*) ipr->p->payload; + while (iprh != NULL) { + next_pbuf = iprh->next_pbuf; + if (next_pbuf != NULL) { + /* Save next helper struct (will be hidden in next step). */ + iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; + + /* hide the fragment header for every succeeding fragment */ + pbuf_remove_header(next_pbuf, IP6_FRAG_HLEN); +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ + u8_t hdrerr = pbuf_remove_header(next_pbuf, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + pbuf_cat(ipr->p, next_pbuf); + } + else { + iprh_tmp = NULL; + } + + iprh = iprh_tmp; + } + + /* Get the first pbuf. */ + p = ipr->p; + +#if IPV6_FRAG_COPYHEADER + if (IPV6_FRAG_REQROOM > 0) { + u8_t hdrerr; + /* Restore (only) the bytes that we overwrote beyond the fragment header. + * Those bytes may belong to either the IPv6 header or an extension + * header placed before the fragment header. */ + MEMCPY(p->payload, ipr->orig_hdr, IPV6_FRAG_REQROOM); + /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ + hdrerr = pbuf_remove_header(p, IPV6_FRAG_REQROOM); + LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); + } +#endif + + /* We need to get rid of the fragment header itself, which is somewhere in + * the middle of the packet (but still in the first pbuf of the chain). + * Getting rid of the header is required by RFC 2460 Sec. 4.5 and necessary + * in order to be able to reassemble packets that are close to full size + * (i.e., around 65535 bytes). We simply move up all the headers before the + * fragment header, including the IPv6 header, and adjust the payload start + * accordingly. This works because all these headers are in the first pbuf + * of the chain, and because the caller adjusts all its pointers on + * successful reassembly. */ + MEMMOVE((u8_t*)ipr->iphdr + sizeof(struct ip6_frag_hdr), ipr->iphdr, + (size_t)((u8_t*)p->payload - (u8_t*)ipr->iphdr)); + + /* This is where the IPv6 header is now. */ + iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->iphdr + + sizeof(struct ip6_frag_hdr)); + + /* Adjust datagram length by adding header lengths. */ + ipr->datagram_len = (u16_t)(ipr->datagram_len + ((u8_t*)p->payload - (u8_t*)iphdr_ptr) + - IP6_HLEN); + + /* Set payload length in ip header. */ + iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); + + /* With the fragment header gone, we now need to adjust the next-header + * field of whatever header was originally before it. Since the packet made + * it through the original header processing routines at least up to the + * fragment header, we do not need any further sanity checks here. */ + if (IP6H_NEXTH(iphdr_ptr) == IP6_NEXTH_FRAGMENT) { + iphdr_ptr->_nexth = ipr->nexth; + } else { + u8_t *ptr = (u8_t *)iphdr_ptr + IP6_HLEN; + while (*ptr != IP6_NEXTH_FRAGMENT) { + ptr += 8 * (1 + ptr[1]); + } + *ptr = ipr->nexth; + } + + /* release the resources allocated for the fragment queue entry */ + if (reassdatagrams == ipr) { + /* it was the first in the list */ + reassdatagrams = ipr->next; + } else { + /* it wasn't the first, so it must have a valid 'prev' */ + LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); + ipr_prev->next = ipr->next; + } + memp_free(MEMP_IP6_REASSDATA, ipr); + + /* adjust the number of pbufs currently queued for reassembly. */ + clen = pbuf_clen(p); + LWIP_ASSERT("ip6_reass_pbufcount >= clen", ip6_reass_pbufcount >= clen); + ip6_reass_pbufcount = (u16_t)(ip6_reass_pbufcount - clen); + + /* Move pbuf back to IPv6 header. This should never fail. */ + if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { + LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); + pbuf_free(p); + return NULL; + } + + /* Return the pbuf chain */ + return p; + } + /* the datagram is not (yet?) reassembled completely */ + return NULL; + +nullreturn: + IP6_FRAG_STATS_INC(ip6_frag.drop); + pbuf_free(p); + return NULL; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG + +#if !LWIP_NETIF_TX_SINGLE_PBUF +/** Allocate a new struct pbuf_custom_ref */ +static struct pbuf_custom_ref* +ip6_frag_alloc_pbuf_custom_ref(void) +{ + return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); +} + +/** Free a struct pbuf_custom_ref */ +static void +ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) +{ + LWIP_ASSERT("p != NULL", p != NULL); + memp_free(MEMP_FRAG_PBUF, p); +} + +/** Free-callback function to free a 'struct pbuf_custom_ref', called by + * pbuf_free. */ +static void +ip6_frag_free_pbuf_custom(struct pbuf *p) +{ + struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; + LWIP_ASSERT("pcr != NULL", pcr != NULL); + LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); + if (pcr->original != NULL) { + pbuf_free(pcr->original); + } + ip6_frag_free_pbuf_custom_ref(pcr); +} +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +/** + * Fragment an IPv6 datagram if too large for the netif or path MTU. + * + * Chop the datagram in MTU sized chunks and send them in order + * by pointing PBUF_REFs into p + * + * @param p ipv6 packet to send + * @param netif the netif on which to send + * @param dest destination ipv6 address to which to send + * + * @return ERR_OK if sent successfully, err_t otherwise + */ +err_t +ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) +{ + struct ip6_hdr *original_ip6hdr; + struct ip6_hdr *ip6hdr; + struct ip6_frag_hdr *frag_hdr; + struct pbuf *rambuf; +#if !LWIP_NETIF_TX_SINGLE_PBUF + struct pbuf *newpbuf; + u16_t newpbuflen = 0; + u16_t left_to_copy; +#endif + static u32_t identification; + u16_t left, cop; + const u16_t mtu = nd6_get_destination_mtu(dest, netif); + const u16_t nfb = (u16_t)((mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK); + u16_t fragment_offset = 0; + u16_t last; + u16_t poff = IP6_HLEN; + + identification++; + + original_ip6hdr = (struct ip6_hdr *)p->payload; + + /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ + LWIP_ASSERT("p->tot_len >= IP6_HLEN", p->tot_len >= IP6_HLEN); + left = (u16_t)(p->tot_len - IP6_HLEN); + + while (left) { + last = (left <= nfb); + + /* Fill this fragment */ + cop = last ? left : nfb; + +#if LWIP_NETIF_TX_SINGLE_PBUF + rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); + poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); + /* make room for the IP header */ + if (pbuf_add_header(rambuf, IP6_HLEN)) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* fill in the IP header */ + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); +#else + /* When not using a static buffer, create a chain of pbufs. + * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. + * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, + * but limited to the size of an mtu. + */ + rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); + if (rambuf == NULL) { + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + LWIP_ASSERT("this needs a pbuf in one piece!", + (p->len >= (IP6_HLEN))); + SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); + ip6hdr = (struct ip6_hdr *)rambuf->payload; + frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); + + /* Can just adjust p directly for needed offset. */ + p->payload = (u8_t *)p->payload + poff; + p->len = (u16_t)(p->len - poff); + p->tot_len = (u16_t)(p->tot_len - poff); + + left_to_copy = cop; + while (left_to_copy) { + struct pbuf_custom_ref *pcr; + newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; + /* Is this pbuf already empty? */ + if (!newpbuflen) { + p = p->next; + continue; + } + pcr = ip6_frag_alloc_pbuf_custom_ref(); + if (pcr == NULL) { + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + /* Mirror this pbuf, although we might not need all of it. */ + newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); + if (newpbuf == NULL) { + ip6_frag_free_pbuf_custom_ref(pcr); + pbuf_free(rambuf); + IP6_FRAG_STATS_INC(ip6_frag.memerr); + return ERR_MEM; + } + pbuf_ref(p); + pcr->original = p; + pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; + + /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain + * so that it is removed when pbuf_dechain is later called on rambuf. + */ + pbuf_cat(rambuf, newpbuf); + left_to_copy = (u16_t)(left_to_copy - newpbuflen); + if (left_to_copy) { + p = p->next; + } + } + poff = newpbuflen; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + /* Set headers */ + frag_hdr->_nexth = original_ip6hdr->_nexth; + frag_hdr->reserved = 0; + frag_hdr->_fragment_offset = lwip_htons((u16_t)((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG))); + frag_hdr->_identification = lwip_htonl(identification); + + IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); + IP6H_PLEN_SET(ip6hdr, (u16_t)(cop + IP6_FRAG_HLEN)); + + /* No need for separate header pbuf - we allowed room for it in rambuf + * when allocated. + */ + IP6_FRAG_STATS_INC(ip6_frag.xmit); + netif->output_ip6(netif, rambuf, dest); + + /* Unfortunately we can't reuse rambuf - the hardware may still be + * using the buffer. Instead we free it (and the ensuing chain) and + * recreate it next time round the loop. If we're lucky the hardware + * will have already sent the packet, the free will really free, and + * there will be zero memory penalty. + */ + + pbuf_free(rambuf); + left = (u16_t)(left - cop); + fragment_offset = (u16_t)(fragment_offset + cop); + } + return ERR_OK; +} + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c new file mode 100644 index 0000000..db1d09a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/mld6.c @@ -0,0 +1,626 @@ +/** + * @file + * Multicast listener discovery + * + * @defgroup mld6 MLD6 + * @ingroup ip6 + * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. + * No support for MLDv2.\n + * Note: The allnodes (ff01::1, ff02::1) group is assumed be received by your + * netif since it must always be received for correct IPv6 operation (e.g. SLAAC). + * Ensure the netif filters are configured accordingly!\n + * The netif flags also need NETIF_FLAG_MLD6 flag set to enable MLD6 on a + * netif ("netif->flags |= NETIF_FLAG_MLD6;").\n + * To be called from TCPIP thread. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +/* Based on igmp.c implementation of igmp v2 protocol */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/mld6.h" +#include "lwip/prot/mld6.h" +#include "lwip/icmp6.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip.h" +#include "lwip/inet_chksum.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/memp.h" +#include "lwip/stats.h" + +#include + + +/* + * MLD constants + */ +#define MLD6_HL 1 +#define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) + +#define MLD6_GROUP_NON_MEMBER 0 +#define MLD6_GROUP_DELAYING_MEMBER 1 +#define MLD6_GROUP_IDLE_MEMBER 2 + +/* Forward declarations. */ +static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); +static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); +static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); +static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); + + +/** + * Stop MLD processing on interface + * + * @param netif network interface on which stop MLD processing + */ +err_t +mld6_stop(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); + + while (group != NULL) { + struct mld_group *next = group->next; /* avoid use-after-free below */ + + /* disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); + } + + /* free group */ + memp_free(MEMP_MLD6_GROUP, group); + + /* move to "next" */ + group = next; + } + return ERR_OK; +} + +/** + * Report MLD memberships for this interface + * + * @param netif network interface on which report MLD memberships + */ +void +mld6_report_groups(struct netif *netif) +{ + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + group = group->next; + } +} + +/** + * Search for a group that is joined on a netif + * + * @param ifp the network interface for which to look + * @param addr the group ipv6 address to search for + * @return a struct mld_group* if the group has been found, + * NULL if the group wasn't found. + */ +struct mld_group * +mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group = netif_mld6_data(ifp); + + while (group != NULL) { + if (ip6_addr_cmp(&(group->group_address), addr)) { + return group; + } + group = group->next; + } + + return NULL; +} + + +/** + * create a new group + * + * @param ifp the network interface for which to create + * @param addr the new group ipv6 + * @return a struct mld_group*, + * NULL on memory error. + */ +static struct mld_group * +mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) +{ + struct mld_group *group; + + group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); + if (group != NULL) { + ip6_addr_set(&(group->group_address), addr); + group->timer = 0; /* Not running */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + group->use = 0; + group->next = netif_mld6_data(ifp); + + netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); + } + + return group; +} + +/** + * Remove a group from the mld_group_list, but do not free it yet + * + * @param group the group to remove + * @return ERR_OK if group was removed from the list, an err_t otherwise + */ +static err_t +mld6_remove_group(struct netif *netif, struct mld_group *group) +{ + err_t err = ERR_OK; + + /* Is it the first group? */ + if (netif_mld6_data(netif) == group) { + netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); + } else { + /* look for group further down the list */ + struct mld_group *tmpGroup; + for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { + if (tmpGroup->next == group) { + tmpGroup->next = group->next; + break; + } + } + /* Group not find group */ + if (tmpGroup == NULL) { + err = ERR_ARG; + } + } + + return err; +} + + +/** + * Process an input MLD message. Called by icmp6_input. + * + * @param p the mld packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +mld6_input(struct pbuf *p, struct netif *inp) +{ + struct mld_header *mld_hdr; + struct mld_group *group; + + MLD6_STATS_INC(mld6.recv); + + /* Check that mld header fits in packet. */ + if (p->len < sizeof(struct mld_header)) { + /* @todo debug message */ + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + MLD6_STATS_INC(mld6.drop); + return; + } + + mld_hdr = (struct mld_header *)p->payload; + + switch (mld_hdr->type) { + case ICMP6_TYPE_MLQ: /* Multicast listener query. */ + /* Is it a general query? */ + if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && + ip6_addr_isany(&(mld_hdr->multicast_address))) { + MLD6_STATS_INC(mld6.rx_general); + /* Report all groups, except all nodes group, and if-local groups. */ + group = netif_mld6_data(inp); + while (group != NULL) { + if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && + (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + group = group->next; + } + } else { + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_group); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* Schedule a report. */ + mld6_delayed_report(group, mld_hdr->max_resp_delay); + } + } + break; /* ICMP6_TYPE_MLQ */ + case ICMP6_TYPE_MLR: /* Multicast listener report. */ + /* Have we joined this group? + * We use IP6 destination address to have a memory aligned copy. + * mld_hdr->multicast_address should be the same. */ + MLD6_STATS_INC(mld6.rx_report); + group = mld6_lookfor_group(inp, ip6_current_dest_addr()); + if (group != NULL) { + /* If we are waiting to report, cancel it. */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + group->timer = 0; /* stopped */ + group->group_state = MLD6_GROUP_IDLE_MEMBER; + group->last_reporter_flag = 0; + } + } + break; /* ICMP6_TYPE_MLR */ + case ICMP6_TYPE_MLD: /* Multicast listener done. */ + /* Do nothing, router will query us. */ + break; /* ICMP6_TYPE_MLD */ + default: + MLD6_STATS_INC(mld6.proterr); + MLD6_STATS_INC(mld6.drop); + break; + } + + pbuf_free(p); +} + +/** + * @ingroup mld6 + * Join a group on one or all network interfaces. + * + * If the group is to be joined on all interfaces, the given group address must + * not have a zone set (i.e., it must have its zone index set to IP6_NO_ZONE). + * If the group is to be joined on one particular interface, the given group + * address may or may not have a zone set. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * join a new group. If IP6_ADDR_ANY6, join on all netifs + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif(s), an err_t otherwise + */ +err_t +mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we join this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err = mld6_joingroup_netif(netif, groupaddr); + if (err != ERR_OK) { + return err; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Join a group on a network interface. + * + * @param netif the network interface which should join a new group. + * @param groupaddr the ipv6 address of the group to join (possibly but not + * necessarily zoned) + * @return ERR_OK if group was joined on the netif, an err_t otherwise + */ +err_t +mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + /* If the address has a particular scope but no zone set, use the netif to + * set one now. Within the mld6 module, all addresses are properly zoned. */ + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group or create a new one if not found */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group == NULL) { + /* Joining a new group. Create a new group entry. */ + group = mld6_new_group(netif, groupaddr); + if (group == NULL) { + return ERR_MEM; + } + + /* Activate this address on the MAC layer. */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); + } + + /* Report our membership. */ + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); + } + + /* Increment group use */ + group->use++; + return ERR_OK; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * Zoning of address follows the same rules as @ref mld6_joingroup. + * + * @param srcaddr ipv6 address (zoned) of the network interface which should + * leave the group. If IP6_ADDR_ANY6, leave on all netifs + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif(s), an err_t otherwise + */ +err_t +mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) +{ + err_t err = ERR_VAL; /* no matching interface */ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + /* loop through netif's */ + NETIF_FOREACH(netif) { + /* Should we leave this interface ? */ + if (ip6_addr_isany(srcaddr) || + netif_get_ip6_addr_match(netif, srcaddr) >= 0) { + err_t res = mld6_leavegroup_netif(netif, groupaddr); + if (err != ERR_OK) { + /* Store this result if we have not yet gotten a success */ + err = res; + } + } + } + + return err; +} + +/** + * @ingroup mld6 + * Leave a group on a network interface. + * + * @param netif the network interface which should leave the group. + * @param groupaddr the ipv6 address of the group to leave (possibly, but not + * necessarily zoned) + * @return ERR_OK if group was left on the netif, an err_t otherwise + */ +err_t +mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) +{ + struct mld_group *group; +#if LWIP_IPV6_SCOPES + ip6_addr_t ip6addr; + + if (ip6_addr_lacks_zone(groupaddr, IP6_MULTICAST)) { + ip6_addr_set(&ip6addr, groupaddr); + ip6_addr_assign_zone(&ip6addr, IP6_MULTICAST, netif); + groupaddr = &ip6addr; + } + IP6_ADDR_ZONECHECK_NETIF(groupaddr, netif); +#endif /* LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + + /* find group */ + group = mld6_lookfor_group(netif, groupaddr); + + if (group != NULL) { + /* Leave if there is no other use of the group */ + if (group->use <= 1) { + /* Remove the group from the list */ + mld6_remove_group(netif, group); + + /* If we are the last reporter for this group */ + if (group->last_reporter_flag) { + MLD6_STATS_INC(mld6.tx_leave); + mld6_send(netif, group, ICMP6_TYPE_MLD); + } + + /* Disable the group at the MAC level */ + if (netif->mld_mac_filter != NULL) { + netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); + } + + /* free group struct */ + memp_free(MEMP_MLD6_GROUP, group); + } else { + /* Decrement group use */ + group->use--; + } + + /* Left group */ + return ERR_OK; + } + + /* Group not found */ + return ERR_VAL; +} + + +/** + * Periodic timer for mld processing. Must be called every + * MLD6_TMR_INTERVAL milliseconds (100). + * + * When a delaying member expires, a membership report is sent. + */ +void +mld6_tmr(void) +{ + struct netif *netif; + + NETIF_FOREACH(netif) { + struct mld_group *group = netif_mld6_data(netif); + + while (group != NULL) { + if (group->timer > 0) { + group->timer--; + if (group->timer == 0) { + /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ + if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { + MLD6_STATS_INC(mld6.tx_report); + mld6_send(netif, group, ICMP6_TYPE_MLR); + group->group_state = MLD6_GROUP_IDLE_MEMBER; + } + } + } + group = group->next; + } + } +} + +/** + * Schedule a delayed membership report for a group + * + * @param group the mld_group for which "delaying" membership report + * should be sent + * @param maxresp_in the max resp delay provided in the query + */ +static void +mld6_delayed_report(struct mld_group *group, u16_t maxresp_in) +{ + /* Convert maxresp from milliseconds to tmr ticks */ + u16_t maxresp = maxresp_in / MLD6_TMR_INTERVAL; + if (maxresp == 0) { + maxresp = 1; + } + +#ifdef LWIP_RAND + /* Randomize maxresp. (if LWIP_RAND is supported) */ + maxresp = (u16_t)(LWIP_RAND() % maxresp); + if (maxresp == 0) { + maxresp = 1; + } +#endif /* LWIP_RAND */ + + /* Apply timer value if no report has been scheduled already. */ + if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || + ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && + ((group->timer == 0) || (maxresp < group->timer)))) { + group->timer = maxresp; + group->group_state = MLD6_GROUP_DELAYING_MEMBER; + } +} + +/** + * Send a MLD message (report or done). + * + * An IPv6 hop-by-hop options header with a router alert option + * is prepended. + * + * @param group the group to report or quit + * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) + */ +static void +mld6_send(struct netif *netif, struct mld_group *group, u8_t type) +{ + struct mld_header *mld_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + + /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + MLD6_HBH_HLEN, PBUF_RAM); + if (p == NULL) { + MLD6_STATS_INC(mld6.memerr); + return; + } + + /* Move to make room for Hop-by-hop options header. */ + if (pbuf_remove_header(p, MLD6_HBH_HLEN)) { + pbuf_free(p); + MLD6_STATS_INC(mld6.lenerr); + return; + } + + /* Select our source address. */ + if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + /* This is a special case, when we are performing duplicate address detection. + * We must join the multicast group, but we don't have a valid address yet. */ + src_addr = IP6_ADDR_ANY6; + } else { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + } + + /* MLD message header pointer. */ + mld_hdr = (struct mld_header *)p->payload; + + /* Set fields. */ + mld_hdr->type = type; + mld_hdr->code = 0; + mld_hdr->chksum = 0; + mld_hdr->max_resp_delay = 0; + mld_hdr->reserved = 0; + ip6_addr_copy_to_packed(mld_hdr->multicast_address, group->group_address); + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, + src_addr, &(group->group_address)); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Add hop-by-hop headers options: router alert with MLD value. */ + ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); + + if (type == ICMP6_TYPE_MLR) { + /* Remember we were the last to report */ + group->last_reporter_flag = 1; + } + + /* Send the packet out. */ + MLD6_STATS_INC(mld6.xmit); + ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), + MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); + pbuf_free(p); +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c new file mode 100644 index 0000000..5deb3f1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/ipv6/nd6.c @@ -0,0 +1,2434 @@ +/** + * @file + * + * Neighbor discovery and stateless address autoconfiguration for IPv6. + * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 + * (Address autoconfiguration). + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/nd6.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/prot/nd6.h" +#include "lwip/prot/icmp6.h" +#include "lwip/pbuf.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" +#include "lwip/netif.h" +#include "lwip/icmp6.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/ip.h" +#include "lwip/stats.h" +#include "lwip/dns.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK +#endif + +/* Router tables. */ +struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; +struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; +struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; +struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; + +/* Default values, can be updated by a RA message. */ +u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; +u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ + +/* Index for cache entries. */ +static u8_t nd6_cached_neighbor_index; +static netif_addr_idx_t nd6_cached_destination_index; + +/* Multicast address holder. */ +static ip6_addr_t multicast_address; + +static u8_t nd6_tmr_rs_reduction; + +/* Static buffer to parse RA packet options */ +union ra_options { + struct lladdr_option lladdr; + struct mtu_option mtu; + struct prefix_option prefix; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + struct rdnss_option rdnss; +#endif +}; +static union ra_options nd6_ra_buffer; + +/* Forward declarations. */ +static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); +static s8_t nd6_new_neighbor_cache_entry(void); +static void nd6_free_neighbor_cache_entry(s8_t i); +static s16_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); +static s16_t nd6_new_destination_cache_entry(void); +static int nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); +static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); +static s8_t nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif); +static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); +static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); + +#define ND6_SEND_FLAG_MULTICAST_DEST 0x01 +#define ND6_SEND_FLAG_ALLNODES_DEST 0x02 +#define ND6_SEND_FLAG_ANY_SRC 0x04 +static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); +static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +static err_t nd6_send_rs(struct netif *netif); +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +#if LWIP_ND6_QUEUEING +static void nd6_free_q(struct nd6_q_entry *q); +#else /* LWIP_ND6_QUEUEING */ +#define nd6_free_q(q) pbuf_free(q) +#endif /* LWIP_ND6_QUEUEING */ +static void nd6_send_q(s8_t i); + + +/** + * A local address has been determined to be a duplicate. Take the appropriate + * action(s) on the address and the interface as a whole. + * + * @param netif the netif that owns the address + * @param addr_idx the index of the address detected to be a duplicate + */ +static void +nd6_duplicate_addr_detected(struct netif *netif, s8_t addr_idx) +{ + + /* Mark the address as duplicate, but leave its lifetimes alone. If this was + * a manually assigned address, it will remain in existence as duplicate, and + * as such be unusable for any practical purposes until manual intervention. + * If this was an autogenerated address, the address will follow normal + * expiration rules, and thus disappear once its valid lifetime expires. */ + netif_ip6_addr_set_state(netif, addr_idx, IP6_ADDR_DUPLICATED); + +#if LWIP_IPV6_AUTOCONFIG + /* If the affected address was the link-local address that we use to generate + * all other addresses, then we should not continue to use those derived + * addresses either, so mark them as duplicate as well. For autoconfig-only + * setups, this will make the interface effectively unusable, approaching the + * intention of RFC 4862 Sec. 5.4.5. @todo implement the full requirements */ + if (addr_idx == 0) { + s8_t i; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + !netif_ip6_addr_isstatic(netif, i)) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DUPLICATED); + } + } + } +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +#if LWIP_IPV6_AUTOCONFIG +/** + * We received a router advertisement that contains a prefix with the + * autoconfiguration flag set. Add or update an associated autogenerated + * address. + * + * @param netif the netif on which the router advertisement arrived + * @param prefix_opt a pointer to the prefix option data + * @param prefix_addr an aligned copy of the prefix address + */ +static void +nd6_process_autoconfig_prefix(struct netif *netif, + struct prefix_option *prefix_opt, const ip6_addr_t *prefix_addr) +{ + ip6_addr_t ip6addr; + u32_t valid_life, pref_life; + u8_t addr_state; + s8_t i, free_idx; + + /* The caller already checks RFC 4862 Sec. 5.5.3 points (a) and (b). We do + * the rest, starting with checks for (c) and (d) here. */ + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + pref_life = lwip_htonl(prefix_opt->preferred_lifetime); + if (pref_life > valid_life || prefix_opt->prefix_length != 64) { + return; /* silently ignore this prefix for autoconfiguration purposes */ + } + + /* If an autogenerated address already exists for this prefix, update its + * lifetimes. An address is considered autogenerated if 1) it is not static + * (i.e., manually assigned), and 2) there is an advertised autoconfiguration + * prefix for it (the one we are processing here). This does not necessarily + * exclude the possibility that the address was actually assigned by, say, + * DHCPv6. If that distinction becomes important in the future, more state + * must be kept. As explained elsewhere we also update lifetimes of tentative + * and duplicate addresses. Skip address slot 0 (the link-local address). */ + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + addr_state = netif_ip6_addr_state(netif, i); + if (!ip6_addr_isinvalid(addr_state) && !netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(prefix_addr, netif_ip6_addr(netif, i))) { + /* Update the valid lifetime, as per RFC 4862 Sec. 5.5.3 point (e). + * The valid lifetime will never drop to zero as a result of this. */ + u32_t remaining_life = netif_ip6_addr_valid_life(netif, i); + if (valid_life > ND6_2HRS || valid_life > remaining_life) { + netif_ip6_addr_set_valid_life(netif, i, valid_life); + } else if (remaining_life > ND6_2HRS) { + netif_ip6_addr_set_valid_life(netif, i, ND6_2HRS); + } + LWIP_ASSERT("bad valid lifetime", !netif_ip6_addr_isstatic(netif, i)); + /* Update the preferred lifetime. No bounds checks are needed here. In + * rare cases the advertisement may un-deprecate the address, though. + * Deprecation is left to the timer code where it is handled anyway. */ + if (pref_life > 0 && addr_state == IP6_ADDR_DEPRECATED) { + netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); + } + netif_ip6_addr_set_pref_life(netif, i, pref_life); + return; /* there should be at most one matching address */ + } + } + + /* No autogenerated address exists for this prefix yet. See if we can add a + * new one. However, if IPv6 autoconfiguration is administratively disabled, + * do not generate new addresses, but do keep updating lifetimes for existing + * addresses. Also, when adding new addresses, we must protect explicitly + * against a valid lifetime of zero, because again, we use that as a special + * value. The generated address would otherwise expire immediately anyway. + * Finally, the original link-local address must be usable at all. We start + * creating addresses even if the link-local address is still in tentative + * state though, and deal with the fallout of that upon DAD collision. */ + addr_state = netif_ip6_addr_state(netif, 0); + if (!netif->ip6_autoconfig_enabled || valid_life == IP6_ADDR_LIFE_STATIC || + ip6_addr_isinvalid(addr_state) || ip6_addr_isduplicated(addr_state)) { + return; + } + + /* Construct the new address that we intend to use, and then see if that + * address really does not exist. It might have been added manually, after + * all. As a side effect, find a free slot. Note that we cannot use + * netif_add_ip6_address() here, as it would return ERR_OK if the address + * already did exist, resulting in that address being given lifetimes. */ + IP6_ADDR(&ip6addr, prefix_addr->addr[0], prefix_addr->addr[1], + netif_ip6_addr(netif, 0)->addr[2], netif_ip6_addr(netif, 0)->addr[3]); + ip6_addr_assign_zone(&ip6addr, IP6_UNICAST, netif); + + free_idx = 0; + for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + if (ip6_addr_cmp(&ip6addr, netif_ip6_addr(netif, i))) { + return; /* formed address already exists */ + } + } else if (free_idx == 0) { + free_idx = i; + } + } + if (free_idx == 0) { + return; /* no address slots available, try again on next advertisement */ + } + + /* Assign the new address to the interface. */ + ip_addr_copy_from_ip6(netif->ip6_addr[free_idx], ip6addr); + netif_ip6_addr_set_valid_life(netif, free_idx, valid_life); + netif_ip6_addr_set_pref_life(netif, free_idx, pref_life); + netif_ip6_addr_set_state(netif, free_idx, IP6_ADDR_TENTATIVE); +} +#endif /* LWIP_IPV6_AUTOCONFIG */ + +/** + * Process an incoming neighbor discovery message + * + * @param p the nd packet, p->payload pointing to the icmpv6 header + * @param inp the netif on which this packet was received + */ +void +nd6_input(struct pbuf *p, struct netif *inp) +{ + u8_t msg_type; + s8_t i; + s16_t dest_idx; + + ND6_STATS_INC(nd6.recv); + + msg_type = *((u8_t *)p->payload); + switch (msg_type) { + case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ + { + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + + /* Check that na header fits in packet. */ + if (p->len < (sizeof(struct na_header))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + na_hdr = (struct na_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, na_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.2 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || na_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: if IP destination is multicast, Solicited flag is zero */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* Unsolicited NA?*/ + if (ip6_addr_ismulticast(ip6_current_dest_addr())) { + /* This is an unsolicited NA. + * link-layer changed? + * part of DAD mechanism? */ + +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* If the target address matches this netif, it is a DAD response. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* We are using a duplicate address. */ + nd6_duplicate_addr_detected(inp, i); + + pbuf_free(p); + return; + } + } +#endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ + + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* This is an unsolicited NA, most likely there was a LLADDR change. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i >= 0) { + if (na_hdr->flags & ND6_FLAG_OVERRIDE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + } + } else { + /* This is a solicited NA. + * neighbor address resolution response? + * neighbor unreachability detection response? */ + + /* Find the cache entry corresponding to this na. */ + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + /* We no longer care about this target address. drop it. */ + pbuf_free(p); + return; + } + + /* Update cache entry. */ + if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || + (neighbor_cache[i].state == ND6_INCOMPLETE)) { + /* Check that link-layer address option also fits in packet. */ + if (p->len < (sizeof(struct na_header) + 2)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + } + + neighbor_cache[i].netif = inp; + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; + + /* Send queued packets, if any. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + } + + break; /* ICMP6_TYPE_NA */ + } + case ICMP6_TYPE_NS: /* Neighbor solicitation. */ + { + struct ns_header *ns_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t target_address; + u8_t accepted; + + /* Check that ns header fits in packet. */ + if (p->len < sizeof(struct ns_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ns_hdr = (struct ns_header *)p->payload; + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, ns_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 7.1.1 requirements. */ + if (IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ns_hdr->code != 0 || + ip6_addr_ismulticast(&target_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + /* @todo RFC MUST: if IP source is 'any', destination is solicited-node multicast address */ + /* @todo RFC MUST: if IP source is 'any', there is no source LL address option */ + + /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ + if (p->len >= (sizeof(struct ns_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Check if the target address is configured on the receiving netif. */ + accepted = 0; + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || + (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && + ip6_addr_isany(ip6_current_src_addr()))) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + accepted = 1; + break; + } + } + + /* NS not for us? */ + if (!accepted) { + pbuf_free(p); + return; + } + + /* Check for ANY address in src (DAD algorithm). */ + if (ip6_addr_isany(ip6_current_src_addr())) { + /* Sender is validating this address. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && + ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { + /* Send a NA back so that the sender does not use this address. */ + nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); + if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { + /* We shouldn't use this address either. */ + nd6_duplicate_addr_detected(inp, i); + } + } + } + } else { + /* Sender is trying to resolve our address. */ + /* Verify that they included their own link-layer address. */ + if (lladdr_opt == NULL) { + /* Not a valid message. */ + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); + if (i>= 0) { + /* We already have a record for the solicitor. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + + /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } else { + /* Add their IPv6 address and link-layer address to neighbor cache. + * We will need it at least to send a unicast NA message, but most + * likely we will also be communicating with this node soon. */ + i = nd6_new_neighbor_cache_entry(); + if (i < 0) { + /* We couldn't assign a cache entry for this neighbor. + * we won't be able to reply. drop it. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + + /* Send back a NA for us. Allocate the reply pbuf. */ + nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); + } + + break; /* ICMP6_TYPE_NS */ + } + case ICMP6_TYPE_RA: /* Router Advertisement. */ + { + struct ra_header *ra_hdr; + u8_t *buffer; /* Used to copy options. */ + u16_t offset; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + /* There can be multiple RDNSS options per RA */ + u8_t rdnss_server_idx = 0; +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + + /* Check that RA header fits in packet. */ + if (p->len < sizeof(struct ra_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + ra_hdr = (struct ra_header *)p->payload; + + /* Check a subset of the other RFC 4861 Sec. 6.1.2 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || ra_hdr->code != 0) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: all included options have a length greater than zero */ + + /* If we are sending RS messages, stop. */ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* ensure at least one solicitation is sent (see RFC 4861, ch. 6.3.7) */ + if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || + (nd6_send_rs(inp) == ERR_OK)) { + inp->rs_count = 0; + } else { + inp->rs_count = 1; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + + /* Get the matching default router entry. */ + i = nd6_get_router(ip6_current_src_addr(), inp); + if (i < 0) { + /* Create a new router entry. */ + i = nd6_new_router(ip6_current_src_addr(), inp); + } + + if (i < 0) { + /* Could not create a new router entry. */ + pbuf_free(p); + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Re-set invalidation timer. */ + default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); + + /* Re-set default timer values. */ +#if LWIP_ND6_ALLOW_RA_UPDATES + if (ra_hdr->retrans_timer > 0) { + retrans_timer = lwip_htonl(ra_hdr->retrans_timer); + } + if (ra_hdr->reachable_time > 0) { + reachable_time = lwip_htonl(ra_hdr->reachable_time); + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + + /* @todo set default hop limit... */ + /* ra_hdr->current_hop_limit;*/ + + /* Update flags in local entry (incl. preference). */ + default_router_list[i].flags = ra_hdr->flags; + +#if LWIP_IPV6_DHCP6 + /* Trigger DHCPv6 if enabled */ + dhcp6_nd6_ra_trigger(inp, ra_hdr->flags & ND6_RA_FLAG_MANAGED_ADDR_CONFIG, + ra_hdr->flags & ND6_RA_FLAG_OTHER_CONFIG); +#endif + + /* Offset to options. */ + offset = sizeof(struct ra_header); + + /* Process each option. */ + while ((p->tot_len - offset) >= 2) { + u8_t option_type; + u16_t option_len; + int option_len8 = pbuf_try_get_at(p, offset + 1); + if (option_len8 <= 0) { + /* read beyond end or zero length */ + goto lenerr_drop_free_return; + } + option_len = ((u8_t)option_len8) << 3; + if (option_len > p->tot_len - offset) { + /* short packet (option does not fit in) */ + goto lenerr_drop_free_return; + } + if (p->len == p->tot_len) { + /* no need to copy from contiguous pbuf */ + buffer = &((u8_t*)p->payload)[offset]; + } else { + /* check if this option fits into our buffer */ + if (option_len > sizeof(nd6_ra_buffer)) { + option_type = pbuf_get_at(p, offset); + /* invalid option length */ + if (option_type != ND6_OPTION_TYPE_RDNSS) { + goto lenerr_drop_free_return; + } + /* we allow RDNSS option to be longer - we'll just drop some servers */ + option_len = sizeof(nd6_ra_buffer); + } + buffer = (u8_t*)&nd6_ra_buffer; + option_len = pbuf_copy_partial(p, &nd6_ra_buffer, option_len, offset); + } + option_type = buffer[0]; + switch (option_type) { + case ND6_OPTION_TYPE_SOURCE_LLADDR: + { + struct lladdr_option *lladdr_opt; + if (option_len < sizeof(struct lladdr_option)) { + goto lenerr_drop_free_return; + } + lladdr_opt = (struct lladdr_option *)buffer; + if ((default_router_list[i].neighbor_entry != NULL) && + (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { + SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); + default_router_list[i].neighbor_entry->state = ND6_REACHABLE; + default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; + } + break; + } + case ND6_OPTION_TYPE_MTU: + { + struct mtu_option *mtu_opt; + u32_t mtu32; + if (option_len < sizeof(struct mtu_option)) { + goto lenerr_drop_free_return; + } + mtu_opt = (struct mtu_option *)buffer; + mtu32 = lwip_htonl(mtu_opt->mtu); + if ((mtu32 >= 1280) && (mtu32 <= 0xffff)) { +#if LWIP_ND6_ALLOW_RA_UPDATES + if (inp->mtu) { + /* don't set the mtu for IPv6 higher than the netif driver supports */ + inp->mtu6 = LWIP_MIN(inp->mtu, (u16_t)mtu32); + } else { + inp->mtu6 = (u16_t)mtu32; + } +#endif /* LWIP_ND6_ALLOW_RA_UPDATES */ + } + break; + } + case ND6_OPTION_TYPE_PREFIX_INFO: + { + struct prefix_option *prefix_opt; + ip6_addr_t prefix_addr; + if (option_len < sizeof(struct prefix_option)) { + goto lenerr_drop_free_return; + } + + prefix_opt = (struct prefix_option *)buffer; + + /* Get a memory-aligned copy of the prefix. */ + ip6_addr_copy_from_packed(prefix_addr, prefix_opt->prefix); + ip6_addr_assign_zone(&prefix_addr, IP6_UNICAST, inp); + + if (!ip6_addr_islinklocal(&prefix_addr)) { + if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && + (prefix_opt->prefix_length == 64)) { + /* Add to on-link prefix list. */ + u32_t valid_life; + s8_t prefix; + + valid_life = lwip_htonl(prefix_opt->valid_lifetime); + + /* find cache entry for this prefix. */ + prefix = nd6_get_onlink_prefix(&prefix_addr, inp); + if (prefix < 0 && valid_life > 0) { + /* Create a new cache entry. */ + prefix = nd6_new_onlink_prefix(&prefix_addr, inp); + } + if (prefix >= 0) { + prefix_list[prefix].invalidation_timer = valid_life; + } + } +#if LWIP_IPV6_AUTOCONFIG + if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { + /* Perform processing for autoconfiguration. */ + nd6_process_autoconfig_prefix(inp, prefix_opt, &prefix_addr); + } +#endif /* LWIP_IPV6_AUTOCONFIG */ + } + + break; + } + case ND6_OPTION_TYPE_ROUTE_INFO: + /* @todo implement preferred routes. + struct route_option * route_opt; + route_opt = (struct route_option *)buffer;*/ + + break; +#if LWIP_ND6_RDNSS_MAX_DNS_SERVERS + case ND6_OPTION_TYPE_RDNSS: + { + u8_t num, n; + u16_t copy_offset = offset + SIZEOF_RDNSS_OPTION_BASE; + struct rdnss_option * rdnss_opt; + if (option_len < SIZEOF_RDNSS_OPTION_BASE) { + goto lenerr_drop_free_return; + } + + rdnss_opt = (struct rdnss_option *)buffer; + num = (rdnss_opt->length - 1) / 2; + for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { + ip_addr_t rdnss_address; + + /* Copy directly from pbuf to get an aligned, zoned copy of the prefix. */ + if (pbuf_copy_partial(p, &rdnss_address, sizeof(ip6_addr_p_t), copy_offset) == sizeof(ip6_addr_p_t)) { + IP_SET_TYPE_VAL(rdnss_address, IPADDR_TYPE_V6); + ip6_addr_assign_zone(ip_2_ip6(&rdnss_address), IP6_UNKNOWN, inp); + + if (htonl(rdnss_opt->lifetime) > 0) { + /* TODO implement Lifetime > 0 */ + dns_setserver(rdnss_server_idx++, &rdnss_address); + } else { + /* TODO implement DNS removal in dns.c */ + u8_t s; + for (s = 0; s < DNS_MAX_SERVERS; s++) { + const ip_addr_t *addr = dns_getserver(s); + if(ip_addr_cmp(addr, &rdnss_address)) { + dns_setserver(s, NULL); + } + } + } + } + } + break; + } +#endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ + default: + /* Unrecognized option, abort. */ + ND6_STATS_INC(nd6.proterr); + break; + } + /* option length is checked earlier to be non-zero to make sure loop ends */ + offset += 8 * (u8_t)option_len8; + } + + break; /* ICMP6_TYPE_RA */ + } + case ICMP6_TYPE_RD: /* Redirect */ + { + struct redirect_header *redir_hdr; + struct lladdr_option *lladdr_opt; + ip6_addr_t destination_address, target_address; + + /* Check that Redir header fits in packet. */ + if (p->len < sizeof(struct redirect_header)) { + /* @todo debug message */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + redir_hdr = (struct redirect_header *)p->payload; + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, redir_hdr->destination_address); + ip6_addr_assign_zone(&destination_address, IP6_UNICAST, inp); + + /* Check a subset of the other RFC 4861 Sec. 8.1 requirements. */ + if (!ip6_addr_islinklocal(ip6_current_src_addr()) || + IP6H_HOPLIM(ip6_current_header()) != ND6_HOPLIM || + redir_hdr->code != 0 || ip6_addr_ismulticast(&destination_address)) { + pbuf_free(p); + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + return; + } + + /* @todo RFC MUST: IP source address equals first-hop router for destination_address */ + /* @todo RFC MUST: ICMP target address is either link-local address or same as destination_address */ + /* @todo RFC MUST: all included options have a length greater than zero */ + + if (p->len >= (sizeof(struct redirect_header) + 2)) { + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); + if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { + lladdr_opt = NULL; + } + } else { + lladdr_opt = NULL; + } + + /* Find dest address in cache */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Create an aligned, zoned copy of the target address. */ + ip6_addr_copy_from_packed(target_address, redir_hdr->target_address); + ip6_addr_assign_zone(&target_address, IP6_UNICAST, inp); + + /* Set the new target address. */ + ip6_addr_copy(destination_cache[dest_idx].next_hop_addr, target_address); + + /* If Link-layer address of other router is given, try to add to neighbor cache. */ + if (lladdr_opt != NULL) { + if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { + i = nd6_find_neighbor_cache_entry(&target_address); + if (i < 0) { + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + neighbor_cache[i].netif = inp; + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + ip6_addr_copy(neighbor_cache[i].next_hop_address, target_address); + + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + if (i >= 0) { + if (neighbor_cache[i].state == ND6_INCOMPLETE) { + MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); + /* Receiving a message does not prove reachability: only in one direction. + * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + } + } + } + break; /* ICMP6_TYPE_RD */ + } + case ICMP6_TYPE_PTB: /* Packet too big */ + { + struct icmp6_hdr *icmp6hdr; /* Packet too big message */ + struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ + u32_t pmtu; + ip6_addr_t destination_address; + + /* Check that ICMPv6 header + IPv6 header fit in payload */ + if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { + /* drop short packets */ + pbuf_free(p); + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + return; + } + + icmp6hdr = (struct icmp6_hdr *)p->payload; + ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); + + /* Create an aligned, zoned copy of the destination address. */ + ip6_addr_copy_from_packed(destination_address, ip6hdr->dest); + ip6_addr_assign_zone(&destination_address, IP6_UNKNOWN, inp); + + /* Look for entry in destination cache. */ + dest_idx = nd6_find_destination_cache_entry(&destination_address); + if (dest_idx < 0) { + /* Destination not in cache, drop packet. */ + pbuf_free(p); + return; + } + + /* Change the Path MTU. */ + pmtu = lwip_htonl(icmp6hdr->data); + destination_cache[dest_idx].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); + + break; /* ICMP6_TYPE_PTB */ + } + + default: + ND6_STATS_INC(nd6.proterr); + ND6_STATS_INC(nd6.drop); + break; /* default */ + } + + pbuf_free(p); + return; +lenerr_drop_free_return: + ND6_STATS_INC(nd6.lenerr); + ND6_STATS_INC(nd6.drop); + pbuf_free(p); +} + + +/** + * Periodic timer for Neighbor discovery functions: + * + * - Update neighbor reachability states + * - Update destination cache entries age + * - Update invalidation timers of default routers and on-link prefixes + * - Update lifetimes of our addresses + * - Perform duplicate address detection (DAD) for our addresses + * - Send router solicitations + */ +void +nd6_tmr(void) +{ + s8_t i; + struct netif *netif; + + /* Process neighbor entries. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + switch (neighbor_cache[i].state) { + case ND6_INCOMPLETE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + break; + case ND6_REACHABLE: + /* Send queued packets, if any are left. Should have been sent already. */ + if (neighbor_cache[i].q != NULL) { + nd6_send_q(i); + } + if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { + /* Change to stale state. */ + neighbor_cache[i].state = ND6_STALE; + neighbor_cache[i].counter.stale_time = 0; + } else { + neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; + } + break; + case ND6_STALE: + neighbor_cache[i].counter.stale_time++; + break; + case ND6_DELAY: + if (neighbor_cache[i].counter.delay_time <= 1) { + /* Change to PROBE state. */ + neighbor_cache[i].state = ND6_PROBE; + neighbor_cache[i].counter.probes_sent = 0; + } else { + neighbor_cache[i].counter.delay_time--; + } + break; + case ND6_PROBE: + if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && + (!neighbor_cache[i].isrouter)) { + /* Retries exceeded. */ + nd6_free_neighbor_cache_entry(i); + } else { + /* Send a NS for this entry. */ + neighbor_cache[i].counter.probes_sent++; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); + } + break; + case ND6_NO_ENTRY: + default: + /* Do nothing. */ + break; + } + } + + /* Process destination entries. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + destination_cache[i].age++; + } + + /* Process router entries. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if (default_router_list[i].neighbor_entry != NULL) { + /* Active entry. */ + if (default_router_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* No more than 1 second remaining. Clear this entry. Also clear any of + * its destination cache entries, as per RFC 4861 Sec. 5.3 and 6.3.5. */ + s8_t j; + for (j = 0; j < LWIP_ND6_NUM_DESTINATIONS; j++) { + if (ip6_addr_cmp(&destination_cache[j].next_hop_addr, + &default_router_list[i].neighbor_entry->next_hop_address)) { + ip6_addr_set_any(&destination_cache[j].destination_addr); + } + } + default_router_list[i].neighbor_entry->isrouter = 0; + default_router_list[i].neighbor_entry = NULL; + default_router_list[i].invalidation_timer = 0; + default_router_list[i].flags = 0; + } else { + default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process prefix entries. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif != NULL) { + if (prefix_list[i].invalidation_timer <= ND6_TMR_INTERVAL / 1000) { + /* Entry timed out, remove it */ + prefix_list[i].invalidation_timer = 0; + prefix_list[i].netif = NULL; + } else { + prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; + } + } + } + + /* Process our own addresses, updating address lifetimes and/or DAD state. */ + NETIF_FOREACH(netif) { + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { + u8_t addr_state; +#if LWIP_IPV6_ADDRESS_LIFETIMES + /* Step 1: update address lifetimes (valid and preferred). */ + addr_state = netif_ip6_addr_state(netif, i); + /* RFC 4862 is not entirely clear as to whether address lifetimes affect + * tentative addresses, and is even less clear as to what should happen + * with duplicate addresses. We choose to track and update lifetimes for + * both those types, although for different reasons: + * - for tentative addresses, the line of thought of Sec. 5.7 combined + * with the potentially long period that an address may be in tentative + * state (due to the interface being down) suggests that lifetimes + * should be independent of external factors which would include DAD; + * - for duplicate addresses, retiring them early could result in a new + * but unwanted attempt at marking them as valid, while retiring them + * late/never could clog up address slots on the netif. + * As a result, we may end up expiring addresses of either type here. + */ + if (!ip6_addr_isinvalid(addr_state) && + !netif_ip6_addr_isstatic(netif, i)) { + u32_t life = netif_ip6_addr_valid_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* The address has expired. */ + netif_ip6_addr_set_valid_life(netif, i, 0); + netif_ip6_addr_set_pref_life(netif, i, 0); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_INVALID); + } else { + if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + LWIP_ASSERT("bad valid lifetime", life != IP6_ADDR_LIFE_STATIC); + netif_ip6_addr_set_valid_life(netif, i, life); + } + /* The address is still here. Update the preferred lifetime too. */ + life = netif_ip6_addr_pref_life(netif, i); + if (life <= ND6_TMR_INTERVAL / 1000) { + /* This case must also trigger if 'life' was already zero, so as to + * deal correctly with advertised preferred-lifetime reductions. */ + netif_ip6_addr_set_pref_life(netif, i, 0); + if (addr_state == IP6_ADDR_PREFERRED) + netif_ip6_addr_set_state(netif, i, IP6_ADDR_DEPRECATED); + } else if (!ip6_addr_life_isinfinite(life)) { + life -= ND6_TMR_INTERVAL / 1000; + netif_ip6_addr_set_pref_life(netif, i, life); + } + } + } + /* The address state may now have changed, so reobtain it next. */ +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + /* Step 2: update DAD state. */ + addr_state = netif_ip6_addr_state(netif, i); + if (ip6_addr_istentative(addr_state)) { + if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { + /* No NA received in response. Mark address as valid. For dynamic + * addresses with an expired preferred lifetime, the state is set to + * deprecated right away. That should almost never happen, though. */ + addr_state = IP6_ADDR_PREFERRED; +#if LWIP_IPV6_ADDRESS_LIFETIMES + if (!netif_ip6_addr_isstatic(netif, i) && + netif_ip6_addr_pref_life(netif, i) == 0) { + addr_state = IP6_ADDR_DEPRECATED; + } +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + netif_ip6_addr_set_state(netif, i, addr_state); + } else if (netif_is_up(netif) && netif_is_link_up(netif)) { + /* tentative: set next state by increasing by one */ + netif_ip6_addr_set_state(netif, i, addr_state + 1); + /* Send a NS for this address. Use the unspecified address as source + * address in all cases (RFC 4862 Sec. 5.4.2), not in the least + * because as it is, we only consider multicast replies for DAD. */ + nd6_send_ns(netif, netif_ip6_addr(netif, i), + ND6_SEND_FLAG_MULTICAST_DEST | ND6_SEND_FLAG_ANY_SRC); + } + } + } + } + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send router solicitation messages, if necessary. */ + if (!nd6_tmr_rs_reduction) { + nd6_tmr_rs_reduction = (ND6_RTR_SOLICITATION_INTERVAL / ND6_TMR_INTERVAL) - 1; + NETIF_FOREACH(netif) { + if ((netif->rs_count > 0) && netif_is_up(netif) && + netif_is_link_up(netif) && + !ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)) && + !ip6_addr_isduplicated(netif_ip6_addr_state(netif, 0))) { + if (nd6_send_rs(netif) == ERR_OK) { + netif->rs_count--; + } + } + } + } else { + nd6_tmr_rs_reduction--; + } +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +} + +/** Send a neighbor solicitation message for a specific neighbor cache entry + * + * @param entry the neightbor cache entry for wich to send the message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) +{ + nd6_send_ns(entry->netif, &entry->next_hop_address, flags); +} + +/** + * Send a neighbor solicitation message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct ns_header *ns_hdr; + struct pbuf *p; + const ip6_addr_t *src_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + if (!(flags & ND6_SEND_FLAG_ANY_SRC) && + ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { + /* Use link-local address as source address. */ + src_addr = netif_ip6_addr(netif, 0); + /* calculate option length (in 8-byte-blocks) */ + lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; + } else { + src_addr = IP6_ADDR_ANY6; + /* Option "MUST NOT be included when the source IP address is the unspecified address." */ + lladdr_opt_len = 0; + } + + /* Allocate a packet. */ + p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + ns_hdr = (struct ns_header *)p->payload; + + ns_hdr->type = ICMP6_TYPE_NS; + ns_hdr->code = 0; + ns_hdr->chksum = 0; + ns_hdr->reserved = 0; + ip6_addr_copy_to_packed(ns_hdr->target_address, *target_addr); + + if (lladdr_opt_len != 0) { + struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + target_addr = &multicast_address; + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + target_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +/** + * Send a neighbor advertisement message + * + * @param netif the netif on which to send the message + * @param target_addr the IPv6 target address for the ND message + * @param flags one of ND6_SEND_FLAG_* + */ +static void +nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) +{ + struct na_header *na_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + const ip6_addr_t *dest_addr; + u16_t lladdr_opt_len; + + LWIP_ASSERT("target address is required", target_addr != NULL); + + /* Use link-local address as source address. */ + /* src_addr = netif_ip6_addr(netif, 0); */ + /* Use target address as source address. */ + src_addr = target_addr; + + /* Allocate a packet. */ + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return; + } + + /* Set fields. */ + na_hdr = (struct na_header *)p->payload; + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); + + na_hdr->type = ICMP6_TYPE_NA; + na_hdr->code = 0; + na_hdr->chksum = 0; + na_hdr->flags = flags & 0xf0; + na_hdr->reserved[0] = 0; + na_hdr->reserved[1] = 0; + na_hdr->reserved[2] = 0; + ip6_addr_copy_to_packed(na_hdr->target_address, *target_addr); + + lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + + /* Generate the solicited node address for the target address. */ + if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { + ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { + ip6_addr_set_allnodes_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + dest_addr = &multicast_address; + } else { + dest_addr = ip6_current_src_addr(); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + dest_addr); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + ip6_output_if(p, src_addr, dest_addr, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); +} + +#if LWIP_IPV6_SEND_ROUTER_SOLICIT +/** + * Send a router solicitation message + * + * @param netif the netif on which to send the message + */ +static err_t +nd6_send_rs(struct netif *netif) +{ + struct rs_header *rs_hdr; + struct lladdr_option *lladdr_opt; + struct pbuf *p; + const ip6_addr_t *src_addr; + err_t err; + u16_t lladdr_opt_len = 0; + + /* Link-local source address, or unspecified address? */ + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { + src_addr = netif_ip6_addr(netif, 0); + } else { + src_addr = IP6_ADDR_ANY6; + } + + /* Generate the all routers target address. */ + ip6_addr_set_allrouters_linklocal(&multicast_address); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + /* Allocate a packet. */ + if (src_addr != IP6_ADDR_ANY6) { + lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); + } + p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); + if (p == NULL) { + ND6_STATS_INC(nd6.memerr); + return ERR_BUF; + } + + /* Set fields. */ + rs_hdr = (struct rs_header *)p->payload; + + rs_hdr->type = ICMP6_TYPE_RS; + rs_hdr->code = 0; + rs_hdr->chksum = 0; + rs_hdr->reserved = 0; + + if (src_addr != IP6_ADDR_ANY6) { + /* Include our hw address. */ + lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); + lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; + lladdr_opt->length = (u8_t)lladdr_opt_len; + SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); + } + +#if CHECKSUM_GEN_ICMP6 + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { + rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, + &multicast_address); + } +#endif /* CHECKSUM_GEN_ICMP6 */ + + /* Send the packet out. */ + ND6_STATS_INC(nd6.xmit); + + err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, + ND6_HOPLIM, 0, IP6_NEXTH_ICMP6, netif); + pbuf_free(p); + + return err; +} +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ + +/** + * Search for a neighbor cache entry + * + * @param ip6addr the IPv6 address of the neighbor + * @return The neighbor cache entry index that matched, -1 if no + * entry is found + */ +static s8_t +nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) +{ + s8_t i; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { + return i; + } + } + return -1; +} + +/** + * Create a new neighbor cache entry. + * + * If no unused entry is found, will try to recycle an old entry + * according to ad-hoc "age" heuristic. + * + * @return The neighbor cache entry index that was created, -1 if no + * entry could be created + */ +static s8_t +nd6_new_neighbor_cache_entry(void) +{ + s8_t i; + s8_t j; + u32_t time; + + + /* First, try to find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].state == ND6_NO_ENTRY) { + return i; + } + } + + /* We need to recycle an entry. in general, do not recycle if it is a router. */ + + /* Next, try to find a Stale entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_STALE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Probe entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_PROBE) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find a Delayed entry. */ + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_DELAY) && + (!neighbor_cache[i].isrouter)) { + nd6_free_neighbor_cache_entry(i); + return i; + } + } + + /* Next, try to find the oldest reachable entry. */ + time = 0xfffffffful; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_REACHABLE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.reachable_time < time) { + j = i; + time = neighbor_cache[i].counter.reachable_time; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry without queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ( + (neighbor_cache[i].q == NULL) && + (neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* Next, find oldest incomplete entry with queued packets. */ + time = 0; + j = -1; + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if ((neighbor_cache[i].state == ND6_INCOMPLETE) && + (!neighbor_cache[i].isrouter)) { + if (neighbor_cache[i].counter.probes_sent >= time) { + j = i; + time = neighbor_cache[i].counter.probes_sent; + } + } + } + if (j >= 0) { + nd6_free_neighbor_cache_entry(j); + return j; + } + + /* No more entries to try. */ + return -1; +} + +/** + * Will free any resources associated with a neighbor cache + * entry, and will mark it as unused. + * + * @param i the neighbor cache entry index to free + */ +static void +nd6_free_neighbor_cache_entry(s8_t i) +{ + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + if (neighbor_cache[i].isrouter) { + /* isrouter needs to be cleared before deleting a neighbor cache entry */ + return; + } + + /* Free any queued packets. */ + if (neighbor_cache[i].q != NULL) { + nd6_free_q(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } + + neighbor_cache[i].state = ND6_NO_ENTRY; + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = NULL; + neighbor_cache[i].counter.reachable_time = 0; + ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); +} + +/** + * Search for a destination cache entry + * + * @param ip6addr the IPv6 address of the destination + * @return The destination cache entry index that matched, -1 if no + * entry is found + */ +static s16_t +nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) +{ + s16_t i; + + IP6_ADDR_ZONECHECK(ip6addr); + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { + return i; + } + } + return -1; +} + +/** + * Create a new destination cache entry. If no unused entry is found, + * will recycle oldest entry. + * + * @return The destination cache entry index that was created, -1 if no + * entry was created + */ +static s16_t +nd6_new_destination_cache_entry(void) +{ + s16_t i, j; + u32_t age; + + /* Find an empty entry. */ + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { + return i; + } + } + + /* Find oldest entry. */ + age = 0; + j = LWIP_ND6_NUM_DESTINATIONS - 1; + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + if (destination_cache[i].age > age) { + j = i; + } + } + + return j; +} + +/** + * Clear the destination cache. + * + * This operation may be necessary for consistency in the light of changing + * local addresses and/or use of the gateway hook. + */ +void +nd6_clear_destination_cache(void) +{ + int i; + + for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { + ip6_addr_set_any(&destination_cache[i].destination_addr); + } +} + +/** + * Determine whether an address matches an on-link prefix or the subnet of a + * statically assigned address. + * + * @param ip6addr the IPv6 address to match + * @return 1 if the address is on-link, 0 otherwise + */ +static int +nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s8_t i; + + /* Check to see if the address matches an on-link prefix. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if ((prefix_list[i].netif == netif) && + (prefix_list[i].invalidation_timer > 0) && + ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { + return 1; + } + } + /* Check to see if address prefix matches a manually configured (= static) + * address. Static addresses have an implied /64 subnet assignment. Dynamic + * addresses (from autoconfiguration) have no implied subnet assignment, and + * are thus effectively /128 assignments. See RFC 5942 for more on this. */ + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && + netif_ip6_addr_isstatic(netif, i) && + ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { + return 1; + } + } + return 0; +} + +/** + * Select a default router for a destination. + * + * This function is used both for routing and for finding a next-hop target for + * a packet. In the former case, the given netif is NULL, and the returned + * router entry must be for a netif suitable for sending packets (up, link up). + * In the latter case, the given netif is not NULL and restricts router choice. + * + * @param ip6addr the destination address + * @param netif the netif for the outgoing packet, if known + * @return the default router entry index, or -1 if no suitable + * router is found + */ +static s8_t +nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) +{ + struct netif *router_netif; + s8_t i, j, valid_router; + static s8_t last_router; + + LWIP_UNUSED_ARG(ip6addr); /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ + + /* @todo: implement default router preference */ + + /* Look for valid routers. A reachable router is preferred. */ + valid_router = -1; + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + /* Is the router netif both set and apppropriate? */ + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + /* Is the router valid, i.e., reachable or probably reachable as per + * RFC 4861 Sec. 6.3.6? Note that we will never return a router that + * has no neighbor cache entry, due to the netif association tests. */ + if (default_router_list[i].neighbor_entry->state != ND6_INCOMPLETE) { + /* Is the router known to be reachable? */ + if (default_router_list[i].neighbor_entry->state == ND6_REACHABLE) { + return i; /* valid and reachable - done! */ + } else if (valid_router < 0) { + valid_router = i; /* valid but not known to be reachable */ + } + } + } + } + } + if (valid_router >= 0) { + return valid_router; + } + + /* Look for any router for which we have any information at all. */ + /* last_router is used for round-robin selection of incomplete routers, as + * recommended in RFC 4861 Sec. 6.3.6 point (2). Advance only when picking a + * route, to select the same router as next-hop target in the common case. */ + if ((netif == NULL) && (++last_router >= LWIP_ND6_NUM_ROUTERS)) { + last_router = 0; + } + i = last_router; + for (j = 0; j < LWIP_ND6_NUM_ROUTERS; j++) { + if (default_router_list[i].neighbor_entry != NULL) { + router_netif = default_router_list[i].neighbor_entry->netif; + if ((router_netif != NULL) && (netif != NULL ? netif == router_netif : + (netif_is_up(router_netif) && netif_is_link_up(router_netif)))) { + return i; + } + } + if (++i >= LWIP_ND6_NUM_ROUTERS) { + i = 0; + } + } + + /* no suitable router found. */ + return -1; +} + +/** + * Find a router-announced route to the given destination. This route may be + * based on an on-link prefix or a default router. + * + * If a suitable route is found, the returned netif is guaranteed to be in a + * suitable state (up, link up) to be used for packet transmission. + * + * @param ip6addr the destination IPv6 address + * @return the netif to use for the destination, or NULL if none found + */ +struct netif * +nd6_find_route(const ip6_addr_t *ip6addr) +{ + struct netif *netif; + s8_t i; + + /* @todo decide if it makes sense to check the destination cache first */ + + /* Check if there is a matching on-link prefix. There may be multiple + * matches. Pick the first one that is associated with a suitable netif. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + netif = prefix_list[i].netif; + if ((netif != NULL) && ip6_addr_netcmp(&prefix_list[i].prefix, ip6addr) && + netif_is_up(netif) && netif_is_link_up(netif)) { + return netif; + } + } + + /* No on-link prefix match. Find a router that can forward the packet. */ + i = nd6_select_router(ip6addr, NULL); + if (i >= 0) { + LWIP_ASSERT("selected router must have a neighbor entry", + default_router_list[i].neighbor_entry != NULL); + return default_router_list[i].neighbor_entry->netif; + } + + return NULL; +} + +/** + * Find an entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is found, if known + * @return the index of the router entry, or -1 if not found + */ +static s8_t +nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t i; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Look for router. */ + for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { + if ((default_router_list[i].neighbor_entry != NULL) && + ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && + ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { + return i; + } + } + + /* router not found. */ + return -1; +} + +/** + * Create a new entry for a default router. + * + * @param router_addr the IPv6 address of the router + * @param netif the netif on which the router is connected, if known + * @return the index on the router table, or -1 if could not be created + */ +static s8_t +nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) +{ + s8_t router_index; + s8_t free_router_index; + s8_t neighbor_index; + + IP6_ADDR_ZONECHECK_NETIF(router_addr, netif); + + /* Do we have a neighbor entry for this router? */ + neighbor_index = nd6_find_neighbor_cache_entry(router_addr); + if (neighbor_index < 0) { + /* Create a neighbor entry for this router. */ + neighbor_index = nd6_new_neighbor_cache_entry(); + if (neighbor_index < 0) { + /* Could not create neighbor entry for this router. */ + return -1; + } + ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); + neighbor_cache[neighbor_index].netif = netif; + neighbor_cache[neighbor_index].q = NULL; + neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; + neighbor_cache[neighbor_index].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); + } + + /* Mark neighbor as router. */ + neighbor_cache[neighbor_index].isrouter = 1; + + /* Look for empty entry. */ + free_router_index = LWIP_ND6_NUM_ROUTERS; + for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { + /* check if router already exists (this is a special case for 2 netifs on the same subnet + - e.g. wifi and cable) */ + if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ + return router_index; + } + if (default_router_list[router_index].neighbor_entry == NULL) { + /* remember lowest free index to create a new entry */ + free_router_index = router_index; + } + } + if (free_router_index < LWIP_ND6_NUM_ROUTERS) { + default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); + return free_router_index; + } + + /* Could not create a router entry. */ + + /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ + neighbor_cache[neighbor_index].isrouter = 0; + + /* router not found. */ + return -1; +} + +/** + * Find the cached entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not found + */ +static s8_t +nd6_get_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Look for prefix in list. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && + (prefix_list[i].netif == netif)) { + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Creates a new entry for an on-link prefix. + * + * @param prefix the IPv6 prefix that is on-link + * @param netif the netif on which the prefix is on-link + * @return the index on the prefix table, or -1 if not created + */ +static s8_t +nd6_new_onlink_prefix(const ip6_addr_t *prefix, struct netif *netif) +{ + s8_t i; + + /* Create new entry. */ + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { + if ((prefix_list[i].netif == NULL) || + (prefix_list[i].invalidation_timer == 0)) { + /* Found empty prefix entry. */ + prefix_list[i].netif = netif; + ip6_addr_set(&(prefix_list[i].prefix), prefix); + return i; + } + } + + /* Entry not available. */ + return -1; +} + +/** + * Determine the next hop for a destination. Will determine if the + * destination is on-link, else a suitable on-link router is selected. + * + * The last entry index is cached for fast entry search. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the neighbor cache entry for the next hop, ERR_RTE if no + * suitable next hop was found, ERR_MEM if no cache entry + * could be created + */ +static s8_t +nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) +{ +#ifdef LWIP_HOOK_ND6_GET_GW + const ip6_addr_t *next_hop_addr; +#endif /* LWIP_HOOK_ND6_GET_GW */ + s8_t i; + s16_t dst_idx; + + IP6_ADDR_ZONECHECK_NETIF(ip6addr, netif); + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif_addr_idx_t addr_hint = netif->hints->addr_hint; + if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { + nd6_cached_destination_index = addr_hint; + } + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look for ip6addr in destination cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + /* the cached entry index is the right one! */ + /* do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + /* Search destination cache. */ + dst_idx = nd6_find_destination_cache_entry(ip6addr); + if (dst_idx >= 0) { + /* found destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Not found. Create a new destination entry. */ + dst_idx = nd6_new_destination_cache_entry(); + if (dst_idx >= 0) { + /* got new destination entry. make it our new cached index. */ + LWIP_ASSERT("type overflow", (size_t)dst_idx < NETIF_ADDR_IDX_MAX); + nd6_cached_destination_index = (netif_addr_idx_t)dst_idx; + } else { + /* Could not create a destination cache entry. */ + return ERR_MEM; + } + + /* Copy dest address to destination cache. */ + ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); + + /* Now find the next hop. is it a neighbor? */ + if (ip6_addr_islinklocal(ip6addr) || + nd6_is_prefix_in_netif(ip6addr, netif)) { + /* Destination in local link. */ + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); +#ifdef LWIP_HOOK_ND6_GET_GW + } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { + /* Next hop for destination provided by hook function. */ + destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; + ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); +#endif /* LWIP_HOOK_ND6_GET_GW */ + } else { + /* We need to select a router. */ + i = nd6_select_router(ip6addr, netif); + if (i < 0) { + /* No router found. */ + ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); + return ERR_RTE; + } + destination_cache[nd6_cached_destination_index].pmtu = netif_mtu6(netif); /* Start with netif mtu, correct through ICMPv6 if necessary */ + ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); + } + } + } + +#if LWIP_NETIF_HWADDRHINT + if (netif->hints != NULL) { + /* per-pcb cached entry was given */ + netif->hints->addr_hint = nd6_cached_destination_index; + } +#endif /* LWIP_NETIF_HWADDRHINT */ + + /* Look in neighbor cache for the next-hop address. */ + if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), + &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + /* Cache hit. */ + /* Do nothing. */ + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); + if (i >= 0) { + /* Found a matching record, make it new cached entry. */ + nd6_cached_neighbor_index = i; + } else { + /* Neighbor not in cache. Make a new entry. */ + i = nd6_new_neighbor_cache_entry(); + if (i >= 0) { + /* got new neighbor entry. make it our new cached index. */ + nd6_cached_neighbor_index = i; + } else { + /* Could not create a neighbor cache entry. */ + return ERR_MEM; + } + + /* Initialize fields. */ + ip6_addr_copy(neighbor_cache[i].next_hop_address, + destination_cache[nd6_cached_destination_index].next_hop_addr); + neighbor_cache[i].isrouter = 0; + neighbor_cache[i].netif = netif; + neighbor_cache[i].state = ND6_INCOMPLETE; + neighbor_cache[i].counter.probes_sent = 1; + nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); + } + } + + /* Reset this destination's age. */ + destination_cache[nd6_cached_destination_index].age = 0; + + return nd6_cached_neighbor_index; +} + +/** + * Queue a packet for a neighbor. + * + * @param neighbor_index the index in the neighbor cache table + * @param q packet to be queued + * @return ERR_OK if succeeded, ERR_MEM if out of memory + */ +static err_t +nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) +{ + err_t result = ERR_MEM; + struct pbuf *p; + int copy_needed = 0; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *new_entry, *r; +#endif /* LWIP_ND6_QUEUEING */ + + if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { + return ERR_ARG; + } + + /* IF q includes a pbuf that must be copied, we have to copy the whole chain + * into a new PBUF_RAM. See the definition of PBUF_NEEDS_COPY for details. */ + p = q; + while (p) { + if (PBUF_NEEDS_COPY(p)) { + copy_needed = 1; + break; + } + p = p->next; + } + if (copy_needed) { + /* copy the whole packet into new pbufs */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ +#if LWIP_ND6_QUEUEING + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); +#else /* LWIP_ND6_QUEUEING */ + pbuf_free(neighbor_cache[neighbor_index].q); + neighbor_cache[neighbor_index].q = NULL; +#endif /* LWIP_ND6_QUEUEING */ + p = pbuf_clone(PBUF_LINK, PBUF_RAM, q); + } + } else { + /* referencing the old pbuf is enough */ + p = q; + pbuf_ref(p); + } + /* packet was copied/ref'd? */ + if (p != NULL) { + /* queue packet ... */ +#if LWIP_ND6_QUEUEING + /* allocate a new nd6 queue entry */ + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { + /* Free oldest packet (as per RFC recommendation) */ + r = neighbor_cache[neighbor_index].q; + neighbor_cache[neighbor_index].q = r->next; + r->next = NULL; + nd6_free_q(r); + new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); + } + if (new_entry != NULL) { + new_entry->next = NULL; + new_entry->p = p; + if (neighbor_cache[neighbor_index].q != NULL) { + /* queue was already existent, append the new entry to the end */ + r = neighbor_cache[neighbor_index].q; + while (r->next != NULL) { + r = r->next; + } + r->next = new_entry; + } else { + /* queue did not exist, first item in queue */ + neighbor_cache[neighbor_index].q = new_entry; + } + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; + } else { + /* the pool MEMP_ND6_QUEUE is empty */ + pbuf_free(p); + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); + /* { result == ERR_MEM } through initialization */ + } +#else /* LWIP_ND6_QUEUEING */ + /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ + if (neighbor_cache[neighbor_index].q != NULL) { + pbuf_free(neighbor_cache[neighbor_index].q); + } + neighbor_cache[neighbor_index].q = p; + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); + result = ERR_OK; +#endif /* LWIP_ND6_QUEUEING */ + } else { + LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); + /* { result == ERR_MEM } through initialization */ + } + + return result; +} + +#if LWIP_ND6_QUEUEING +/** + * Free a complete queue of nd6 q entries + * + * @param q a queue of nd6_q_entry to free + */ +static void +nd6_free_q(struct nd6_q_entry *q) +{ + struct nd6_q_entry *r; + LWIP_ASSERT("q != NULL", q != NULL); + LWIP_ASSERT("q->p != NULL", q->p != NULL); + while (q) { + r = q; + q = q->next; + LWIP_ASSERT("r->p != NULL", (r->p != NULL)); + pbuf_free(r->p); + memp_free(MEMP_ND6_QUEUE, r); + } +} +#endif /* LWIP_ND6_QUEUEING */ + +/** + * Send queued packets for a neighbor + * + * @param i the neighbor to send packets to + */ +static void +nd6_send_q(s8_t i) +{ + struct ip6_hdr *ip6hdr; + ip6_addr_t dest; +#if LWIP_ND6_QUEUEING + struct nd6_q_entry *q; +#endif /* LWIP_ND6_QUEUEING */ + + if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { + return; + } + +#if LWIP_ND6_QUEUEING + while (neighbor_cache[i].q != NULL) { + /* remember first in queue */ + q = neighbor_cache[i].q; + /* pop first item off the queue */ + neighbor_cache[i].q = q->next; + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(q->p->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); + /* free the queued IP packet */ + pbuf_free(q->p); + /* now queue entry can be freed */ + memp_free(MEMP_ND6_QUEUE, q); + } +#else /* LWIP_ND6_QUEUEING */ + if (neighbor_cache[i].q != NULL) { + /* Get ipv6 header. */ + ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); + /* Create an aligned copy. */ + ip6_addr_copy_from_packed(dest, ip6hdr->dest); + /* Restore the zone, if applicable. */ + ip6_addr_assign_zone(&dest, IP6_UNKNOWN, neighbor_cache[i].netif); + /* send the queued IPv6 packet */ + (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); + /* free the queued IP packet */ + pbuf_free(neighbor_cache[i].q); + neighbor_cache[i].q = NULL; + } +#endif /* LWIP_ND6_QUEUEING */ +} + +/** + * A packet is to be transmitted to a specific IPv6 destination on a specific + * interface. Check if we can find the hardware address of the next hop to use + * for the packet. If so, give the hardware address to the caller, which should + * use it to send the packet right away. Otherwise, enqueue the packet for + * later transmission while looking up the hardware address, if possible. + * + * As such, this function returns one of three different possible results: + * + * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. + * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. + * - not ERR_OK: something went wrong; forward the error upward in the stack. + * + * @param netif The lwIP network interface on which the IP packet will be sent. + * @param q The pbuf(s) containing the IP packet to be sent. + * @param ip6addr The destination IPv6 address of the packet. + * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning + * the packet has been queued). + * @return + * - ERR_OK on success, ERR_RTE if no route was found for the packet, + * or ERR_MEM if low memory conditions prohibit sending the packet at all. + */ +err_t +nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) +{ + s8_t i; + + /* Get next hop record. */ + i = nd6_get_next_hop_entry(ip6addr, netif); + if (i < 0) { + /* failed to get a next hop neighbor record. */ + return i; + } + + /* Now that we have a destination record, send or queue the packet. */ + if (neighbor_cache[i].state == ND6_STALE) { + /* Switch to delay state. */ + neighbor_cache[i].state = ND6_DELAY; + neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; + } + /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ + if ((neighbor_cache[i].state == ND6_REACHABLE) || + (neighbor_cache[i].state == ND6_DELAY) || + (neighbor_cache[i].state == ND6_PROBE)) { + + /* Tell the caller to send out the packet now. */ + *hwaddrp = neighbor_cache[i].lladdr; + return ERR_OK; + } + + /* We should queue packet on this interface. */ + *hwaddrp = NULL; + return nd6_queue_packet(i, q); +} + + +/** + * Get the Path MTU for a destination. + * + * @param ip6addr the destination address + * @param netif the netif on which the packet will be sent + * @return the Path MTU, if known, or the netif default MTU + */ +u16_t +nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) +{ + s16_t i; + + i = nd6_find_destination_cache_entry(ip6addr); + if (i >= 0) { + if (destination_cache[i].pmtu > 0) { + return destination_cache[i].pmtu; + } + } + + if (netif != NULL) { + return netif_mtu6(netif); + } + + return 1280; /* Minimum MTU */ +} + + +#if LWIP_ND6_TCP_REACHABILITY_HINTS +/** + * Provide the Neighbor discovery process with a hint that a + * destination is reachable. Called by tcp_receive when ACKs are + * received or sent (as per RFC). This is useful to avoid sending + * NS messages every 30 seconds. + * + * @param ip6addr the destination address which is know to be reachable + * by an upper layer protocol (TCP) + */ +void +nd6_reachability_hint(const ip6_addr_t *ip6addr) +{ + s8_t i; + s16_t dst_idx; + + /* Find destination in cache. */ + if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { + dst_idx = nd6_cached_destination_index; + ND6_STATS_INC(nd6.cachehit); + } else { + dst_idx = nd6_find_destination_cache_entry(ip6addr); + } + if (dst_idx < 0) { + return; + } + + /* Find next hop neighbor in cache. */ + if (ip6_addr_cmp(&(destination_cache[dst_idx].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { + i = nd6_cached_neighbor_index; + ND6_STATS_INC(nd6.cachehit); + } else { + i = nd6_find_neighbor_cache_entry(&(destination_cache[dst_idx].next_hop_addr)); + } + if (i < 0) { + return; + } + + /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ + if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { + return; + } + + /* Set reachability state. */ + neighbor_cache[i].state = ND6_REACHABLE; + neighbor_cache[i].counter.reachable_time = reachable_time; +} +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +/** + * Remove all prefix, neighbor_cache and router entries of the specified netif. + * + * @param netif points to a network interface + */ +void +nd6_cleanup_netif(struct netif *netif) +{ + u8_t i; + s8_t router_index; + for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { + if (prefix_list[i].netif == netif) { + prefix_list[i].netif = NULL; + } + } + for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { + if (neighbor_cache[i].netif == netif) { + for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { + if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { + default_router_list[router_index].neighbor_entry = NULL; + default_router_list[router_index].flags = 0; + } + } + neighbor_cache[i].isrouter = 0; + nd6_free_neighbor_cache_entry(i); + } + } + /* Clear the destination cache, since many entries may now have become + * invalid for one of several reasons. As destination cache entries have no + * netif association, use a sledgehammer approach (this can be improved). */ + nd6_clear_destination_cache(); +} + +#if LWIP_IPV6_MLD +/** + * The state of a local IPv6 address entry is about to change. If needed, join + * or leave the solicited-node multicast group for the address. + * + * @param netif The netif that owns the address. + * @param addr_idx The index of the address. + * @param new_state The new (IP6_ADDR_) state for the address. + */ +void +nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) +{ + u8_t old_state, old_member, new_member; + + old_state = netif_ip6_addr_state(netif, addr_idx); + + /* Determine whether we were, and should be, a member of the solicited-node + * multicast group for this address. For tentative addresses, the group is + * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ + old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_DUPLICATED && old_state != IP6_ADDR_TENTATIVE); + new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_DUPLICATED && new_state != IP6_ADDR_TENTATIVE); + + if (old_member != new_member) { + ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); + ip6_addr_assign_zone(&multicast_address, IP6_MULTICAST, netif); + + if (new_member) { + mld6_joingroup_netif(netif, &multicast_address); + } else { + mld6_leavegroup_netif(netif, &multicast_address); + } + } +} +#endif /* LWIP_IPV6_MLD */ + +/** Netif was added, set up, or reconnected (link up) */ +void +nd6_restart_netif(struct netif *netif) +{ +#if LWIP_IPV6_SEND_ROUTER_SOLICIT + /* Send Router Solicitation messages (see RFC 4861, ch. 6.3.7). */ + netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; +#endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ +} + +#endif /* LWIP_IPV6 */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c new file mode 100644 index 0000000..16fd984 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/mem.c @@ -0,0 +1,1017 @@ +/** + * @file + * Dynamic memory manager + * + * This is a lightweight replacement for the standard C library malloc(). + * + * If you want to use the standard C library malloc() instead, define + * MEM_LIBC_MALLOC to 1 in your lwipopts.h + * + * To let mem_malloc() use pools (prevents fragmentation and is much faster than + * a heap but might waste some memory), define MEM_USE_POOLS to 1, define + * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list + * of pools like this (more pools can be added between _START and _END): + * + * Define three pools with sizes 256, 512, and 1512 bytes + * LWIP_MALLOC_MEMPOOL_START + * LWIP_MALLOC_MEMPOOL(20, 256) + * LWIP_MALLOC_MEMPOOL(10, 512) + * LWIP_MALLOC_MEMPOOL(5, 1512) + * LWIP_MALLOC_MEMPOOL_END + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" +#include "lwip/mem.h" +#include "lwip/def.h" +#include "lwip/sys.h" +#include "lwip/stats.h" +#include "lwip/err.h" + +#include + +#if MEM_LIBC_MALLOC +#include /* for malloc()/free() */ +#endif + +/* This is overridable for tests only... */ +#ifndef LWIP_MEM_ILLEGAL_FREE +#define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0) +#endif + +#define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x)) +#define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y)) +#define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y)) + +#if MEM_OVERFLOW_CHECK +#define MEM_SANITY_OFFSET MEM_SANITY_REGION_BEFORE_ALIGNED +#define MEM_SANITY_OVERHEAD (MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED) +#else +#define MEM_SANITY_OFFSET 0 +#define MEM_SANITY_OVERHEAD 0 +#endif + +#if MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK +/** + * Check if a mep element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the mem element to check + * @param size allocated size of the element + * @param descr1 description of the element source shown on error + * @param descr2 description of the element source shown on error + */ +void +mem_overflow_check_raw(void *p, size_t size, const char *descr1, const char *descr2) +{ +#if MEM_SANITY_REGION_AFTER_ALIGNED || MEM_SANITY_REGION_BEFORE_ALIGNED + u16_t k; + u8_t *m; + +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + for (k = 0; k < MEM_SANITY_REGION_AFTER_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem overflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + for (k = 0; k < MEM_SANITY_REGION_BEFORE_ALIGNED; k++) { + if (m[k] != 0xcd) { + char errstr[128]; + snprintf(errstr, sizeof(errstr), "detected mem underflow in %s%s", descr1, descr2); + LWIP_ASSERT(errstr, 0); + } + } +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 */ +#else + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); + LWIP_UNUSED_ARG(descr); +#endif +} + +/** + * Initialize the restricted area of a mem element. + */ +void +mem_overflow_init_raw(void *p, size_t size) +{ +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 + u8_t *m; +#if MEM_SANITY_REGION_BEFORE_ALIGNED > 0 + m = (u8_t *)p - MEM_SANITY_REGION_BEFORE_ALIGNED; + memset(m, 0xcd, MEM_SANITY_REGION_BEFORE_ALIGNED); +#endif +#if MEM_SANITY_REGION_AFTER_ALIGNED > 0 + m = (u8_t *)p + size; + memset(m, 0xcd, MEM_SANITY_REGION_AFTER_ALIGNED); +#endif +#else /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(desc); +#endif /* MEM_SANITY_REGION_BEFORE_ALIGNED > 0 || MEM_SANITY_REGION_AFTER_ALIGNED > 0 */ +} +#endif /* MEM_OVERFLOW_CHECK || MEMP_OVERFLOW_CHECK */ + +#if MEM_LIBC_MALLOC || MEM_USE_POOLS + +/** mem_init is not used when using pools instead of a heap or using + * C library malloc(). + */ +void +mem_init(void) +{ +} + +/** mem_trim is not used when using pools instead of a heap or using + * C library malloc(): we can't free part of a pool element and the stack + * support mem_trim() to return a different pointer + */ +void * +mem_trim(void *mem, mem_size_t size) +{ + LWIP_UNUSED_ARG(size); + return mem; +} +#endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC +/* lwIP heap implemented using C library malloc() */ + +/* in case C library malloc() needs extra protection, + * allow these defines to be overridden. + */ +#ifndef mem_clib_free +#define mem_clib_free free +#endif +#ifndef mem_clib_malloc +#define mem_clib_malloc malloc +#endif +#ifndef mem_clib_calloc +#define mem_clib_calloc calloc +#endif + +#if LWIP_STATS && MEM_STATS +#define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) +#else +#define MEM_LIBC_STATSHELPER_SIZE 0 +#endif + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); + if (ret == NULL) { + MEM_STATS_INC_LOCKED(err); + } else { + LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); +#if LWIP_STATS && MEM_STATS + *(mem_size_t *)ret = size; + ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_INC_USED_LOCKED(used, size); +#endif + } + return ret; +} + +/** Put memory back on the heap + * + * @param rmem is the pointer as returned by a previous call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); +#if LWIP_STATS && MEM_STATS + rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE; + MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem); +#endif + mem_clib_free(rmem); +} + +#elif MEM_USE_POOLS + +/* lwIP heap implemented with different sized pools */ + +/** + * Allocate memory: determine the smallest pool that is big enough + * to contain an element of 'size' and get an element from that pool. + * + * @param size the size in bytes of the memory needed + * @return a pointer to the allocated memory or NULL if the pool is empty + */ +void * +mem_malloc(mem_size_t size) +{ + void *ret; + struct memp_malloc_helper *element = NULL; + memp_t poolnr; + mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + + for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { + /* is this pool big enough to hold an element of the required size + plus a struct memp_malloc_helper that saves the pool this element came from? */ + if (required_size <= memp_pools[poolnr]->size) { + element = (struct memp_malloc_helper *)memp_malloc(poolnr); + if (element == NULL) { + /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ +#if MEM_USE_POOLS_TRY_BIGGER_POOL + /** Try a bigger pool if this one is empty! */ + if (poolnr < MEMP_POOL_LAST) { + continue; + } +#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ + MEM_STATS_INC_LOCKED(err); + return NULL; + } + break; + } + } + if (poolnr > MEMP_POOL_LAST) { + LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); + MEM_STATS_INC_LOCKED(err); + return NULL; + } + + /* save the pool number this element came from */ + element->poolnr = poolnr; + /* and return a pointer to the memory directly after the struct memp_malloc_helper */ + ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); + +#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) + /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ + element->size = (u16_t)size; + MEM_STATS_INC_USED_LOCKED(used, element->size); +#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ +#if MEMP_OVERFLOW_CHECK + /* initialize unused memory (diff between requested size and selected pool's size) */ + memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size); +#endif /* MEMP_OVERFLOW_CHECK */ + return ret; +} + +/** + * Free memory previously allocated by mem_malloc. Loads the pool number + * and calls memp_free with that pool number to put the element back into + * its pool + * + * @param rmem the memory element to free + */ +void +mem_free(void *rmem) +{ + struct memp_malloc_helper *hmem; + + LWIP_ASSERT("rmem != NULL", (rmem != NULL)); + LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); + + /* get the original struct memp_malloc_helper */ + /* cast through void* to get rid of alignment warnings */ + hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); + + LWIP_ASSERT("hmem != NULL", (hmem != NULL)); + LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); + LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); + + MEM_STATS_DEC_USED_LOCKED(used, hmem->size); +#if MEMP_OVERFLOW_CHECK + { + u16_t i; + LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", + hmem->size <= memp_pools[hmem->poolnr]->size); + /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ + for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { + u8_t data = *((u8_t *)rmem + i); + LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); + } + } +#endif /* MEMP_OVERFLOW_CHECK */ + + /* and put it in the pool we saved earlier */ + memp_free(hmem->poolnr, hmem); +} + +#else /* MEM_USE_POOLS */ +/* lwIP replacement for your libc malloc() */ + +/** + * The heap is made up as a list of structs of this type. + * This does not have to be aligned since for getting its size, + * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. + */ +struct mem { + /** index (-> ram[next]) of the next struct */ + mem_size_t next; + /** index (-> ram[prev]) of the previous struct */ + mem_size_t prev; + /** 1: this area is used; 0: this area is unused */ + u8_t used; +#if MEM_OVERFLOW_CHECK + /** this keeps track of the user allocation size for guard checks */ + mem_size_t user_size; +#endif +}; + +/** All allocated blocks will be MIN_SIZE bytes big, at least! + * MIN_SIZE can be overridden to suit your needs. Smaller values save space, + * larger values could prevent too small blocks to fragment the RAM too much. */ +#ifndef MIN_SIZE +#define MIN_SIZE 12 +#endif /* MIN_SIZE */ +/* some alignment macros: we define them here for better source code layout */ +#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) +#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) +#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) + +/** If you want to relocate the heap to external memory, simply define + * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. + * If so, make sure the memory at that location is big enough (see below on + * how that space is calculated). */ +#ifndef LWIP_RAM_HEAP_POINTER +/** the heap. we need one struct mem at the end and some room for alignment */ +LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM)); +#define LWIP_RAM_HEAP_POINTER ram_heap +#endif /* LWIP_RAM_HEAP_POINTER */ + +/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ +static u8_t *ram; +/** the last entry, always unused! */ +static struct mem *ram_end; + +/** concurrent access protection */ +#if !NO_SYS +static sys_mutex_t mem_mutex; +#endif + +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + +static volatile u8_t mem_free_count; + +/* Allow mem_free from other (e.g. interrupt) context */ +#define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) +#define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) +#define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) +#define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) +#define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) +#define LWIP_MEM_LFREE_VOLATILE volatile + +#else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/* Protect the heap only by using a mutex */ +#define LWIP_MEM_FREE_DECL_PROTECT() +#define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) +#define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) +/* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */ +#define LWIP_MEM_ALLOC_DECL_PROTECT() +#define LWIP_MEM_ALLOC_PROTECT() +#define LWIP_MEM_ALLOC_UNPROTECT() +#define LWIP_MEM_LFREE_VOLATILE + +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + +/** pointer to the lowest free block, this is used for faster search */ +static struct mem * LWIP_MEM_LFREE_VOLATILE lfree; + +#if MEM_SANITY_CHECK +static void mem_sanity(void); +#define MEM_SANITY() mem_sanity() +#else +#define MEM_SANITY() +#endif + +#if MEM_OVERFLOW_CHECK +static void +mem_overflow_init_element(struct mem *mem, mem_size_t user_size) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem->user_size = user_size; + mem_overflow_init_raw(p, user_size); +} + +static void +mem_overflow_check_element(struct mem *mem) +{ + void *p = (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + mem_overflow_check_raw(p, mem->user_size, "heap", ""); +} +#else /* MEM_OVERFLOW_CHECK */ +#define mem_overflow_init_element(mem, size) +#define mem_overflow_check_element(mem) +#endif /* MEM_OVERFLOW_CHECK */ + +static struct mem * +ptr_to_mem(mem_size_t ptr) +{ + return (struct mem *)(void *)&ram[ptr]; +} + +static mem_size_t +mem_to_ptr(void *mem) +{ + return (mem_size_t)((u8_t *)mem - ram); +} + +/** + * "Plug holes" by combining adjacent empty struct mems. + * After this function is through, there should not exist + * one empty struct mem pointing to another empty struct mem. + * + * @param mem this points to a struct mem which just has been freed + * @internal this function is only called by mem_free() and mem_trim() + * + * This assumes access to the heap is protected by the calling function + * already. + */ +static void +plug_holes(struct mem *mem) +{ + struct mem *nmem; + struct mem *pmem; + + LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); + LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); + LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); + + /* plug hole forward */ + LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); + + nmem = ptr_to_mem(mem->next); + if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { + /* if mem->next is unused and not end of ram, combine mem and mem->next */ + if (lfree == nmem) { + lfree = mem; + } + mem->next = nmem->next; + if (nmem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(nmem->next)->prev = mem_to_ptr(mem); + } + } + + /* plug hole backward */ + pmem = ptr_to_mem(mem->prev); + if (pmem != mem && pmem->used == 0) { + /* if mem->prev is unused, combine mem and mem->prev */ + if (lfree == mem) { + lfree = pmem; + } + pmem->next = mem->next; + if (mem->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem->next)->prev = mem_to_ptr(pmem); + } + } +} + +/** + * Zero the heap and initialize start, end and lowest-free + */ +void +mem_init(void) +{ + struct mem *mem; + + LWIP_ASSERT("Sanity check alignment", + (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0); + + /* align the heap */ + ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); + /* initialize the start of the heap */ + mem = (struct mem *)(void *)ram; + mem->next = MEM_SIZE_ALIGNED; + mem->prev = 0; + mem->used = 0; + /* initialize the end of the heap */ + ram_end = ptr_to_mem(MEM_SIZE_ALIGNED); + ram_end->used = 1; + ram_end->next = MEM_SIZE_ALIGNED; + ram_end->prev = MEM_SIZE_ALIGNED; + MEM_SANITY(); + + /* initialize the lowest-free pointer to the start of the heap */ + lfree = (struct mem *)(void *)ram; + + MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); + + if (sys_mutex_new(&mem_mutex) != ERR_OK) { + LWIP_ASSERT("failed to create mem_mutex", 0); + } +} + +/* Check if a struct mem is correctly linked. + * If not, double-free is a possible reason. + */ +static int +mem_link_valid(struct mem *mem) +{ + struct mem *nmem, *pmem; + mem_size_t rmem_idx; + rmem_idx = mem_to_ptr(mem); + nmem = ptr_to_mem(mem->next); + pmem = ptr_to_mem(mem->prev); + if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) || + ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) || + ((nmem != ram_end) && (nmem->prev != rmem_idx))) { + return 0; + } + return 1; +} + +#if MEM_SANITY_CHECK +static void +mem_sanity(void) +{ + struct mem *mem; + u8_t last_used; + + /* begin with first element here */ + mem = (struct mem *)ram; + LWIP_ASSERT("heap element used valid", (mem->used == 0) || (mem->used == 1)); + last_used = mem->used; + LWIP_ASSERT("heap element prev ptr valid", mem->prev == 0); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + /* check all elements before the end of the heap */ + for (mem = ptr_to_mem(mem->next); + ((u8_t *)mem > ram) && (mem < ram_end); + mem = ptr_to_mem(mem->next)) { + LWIP_ASSERT("heap element aligned", LWIP_MEM_ALIGN(mem) == mem); + LWIP_ASSERT("heap element prev ptr valid", mem->prev <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next <= MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element prev ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->prev) == ptr_to_mem(mem->prev))); + LWIP_ASSERT("heap element next ptr aligned", LWIP_MEM_ALIGN(ptr_to_mem(mem->next) == ptr_to_mem(mem->next))); + + if (last_used == 0) { + /* 2 unused elements in a row? */ + LWIP_ASSERT("heap element unused?", mem->used == 1); + } else { + LWIP_ASSERT("heap element unused member", (mem->used == 0) || (mem->used == 1)); + } + + LWIP_ASSERT("heap element link valid", mem_link_valid(mem)); + + /* used/unused altering */ + last_used = mem->used; + } + LWIP_ASSERT("heap end ptr sanity", mem == ptr_to_mem(MEM_SIZE_ALIGNED)); + LWIP_ASSERT("heap element used valid", mem->used == 1); + LWIP_ASSERT("heap element prev ptr valid", mem->prev == MEM_SIZE_ALIGNED); + LWIP_ASSERT("heap element next ptr valid", mem->next == MEM_SIZE_ALIGNED); +} +#endif /* MEM_SANITY_CHECK */ + +/** + * Put a struct mem back on the heap + * + * @param rmem is the data portion of a struct mem as returned by a previous + * call to mem_malloc() + */ +void +mem_free(void *rmem) +{ + struct mem *mem; + LWIP_MEM_FREE_DECL_PROTECT(); + + if (rmem == NULL) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); + return; + } + if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) { + LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* Get the corresponding struct mem: */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); + + if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory"); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + /* mem has to be in a used state */ + if (!mem->used) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + if (!mem_link_valid(mem)) { + LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free"); + LWIP_MEM_FREE_UNPROTECT(); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return; + } + + /* mem is now unused. */ + mem->used = 0; + + if (mem < lfree) { + /* the newly freed struct is now the lowest */ + lfree = mem; + } + + MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); + + /* finally, see if prev or next are free also */ + plug_holes(mem); + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); +} + +/** + * Shrink memory returned by mem_malloc(). + * + * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked + * @param new_size required size after shrinking (needs to be smaller than or + * equal to the previous size) + * @return for compatibility reasons: is always == rmem, at the moment + * or NULL if newsize is > old size, in which case rmem is NOT touched + * or freed! + */ +void * +mem_trim(void *rmem, mem_size_t new_size) +{ + mem_size_t size, newsize; + mem_size_t ptr, ptr2; + struct mem *mem, *mem2; + /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ + LWIP_MEM_FREE_DECL_PROTECT(); + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size); + if (newsize < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + newsize = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + newsize += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) { + return NULL; + } + + LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && + (u8_t *)rmem < (u8_t *)ram_end); + + if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); + /* protect mem stats from concurrent access */ + MEM_STATS_INC_LOCKED(illegal); + return rmem; + } + /* Get the corresponding struct mem ... */ + /* cast through void* to get rid of alignment warnings */ + mem = (struct mem *)(void *)((u8_t *)rmem - (SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET)); +#if MEM_OVERFLOW_CHECK + mem_overflow_check_element(mem); +#endif + /* ... and its offset pointer */ + ptr = mem_to_ptr(mem); + + size = (mem_size_t)((mem_size_t)(mem->next - ptr) - (SIZEOF_STRUCT_MEM + MEM_SANITY_OVERHEAD)); + LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); + if (newsize > size) { + /* not supported */ + return NULL; + } + if (newsize == size) { + /* No change in size, simply return */ + return rmem; + } + + /* protect the heap from concurrent access */ + LWIP_MEM_FREE_PROTECT(); + + mem2 = ptr_to_mem(mem->next); + if (mem2->used == 0) { + /* The next struct is unused, we can simply move it at little */ + mem_size_t next; + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + /* remember the old next pointer */ + next = mem2->next; + /* create new struct mem which is moved directly after the shrinked mem */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + if (lfree == mem2) { + lfree = ptr_to_mem(ptr2); + } + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + /* restore the next pointer */ + mem2->next = next; + /* link it back to mem */ + mem2->prev = ptr; + /* link mem to it */ + mem->next = ptr2; + /* last thing to restore linked list: as we have moved mem2, + * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not + * the end of the heap */ + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* no need to plug holes, we've already done that */ + } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { + /* Next struct is used but there's room for another struct mem with + * at least MIN_SIZE_ALIGNED of data. + * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem + * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize); + LWIP_ASSERT("invalid next ptr", mem->next != MEM_SIZE_ALIGNED); + mem2 = ptr_to_mem(ptr2); + if (mem2 < lfree) { + lfree = mem2; + } + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + mem->next = ptr2; + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_DEC_USED(used, (size - newsize)); + /* the original mem->next is used, so no need to plug holes! */ + } + /* else { + next struct mem is used but size between mem and mem2 is not big enough + to create another struct mem + -> don't do anyhting. + -> the remaining space stays unused since it is too small + } */ +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, new_size); +#endif + MEM_SANITY(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 1; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_FREE_UNPROTECT(); + return rmem; +} + +/** + * Allocate a block of memory with a minimum of 'size' bytes. + * + * @param size_in is the minimum size of the requested block in bytes. + * @return pointer to allocated memory or NULL if no free memory was found. + * + * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). + */ +void * +mem_malloc(mem_size_t size_in) +{ + mem_size_t ptr, ptr2, size; + struct mem *mem, *mem2; +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + u8_t local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + LWIP_MEM_ALLOC_DECL_PROTECT(); + + if (size_in == 0) { + return NULL; + } + + /* Expand the size of the allocated memory region so that we can + adjust for alignment. */ + size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in); + if (size < MIN_SIZE_ALIGNED) { + /* every data block must be at least MIN_SIZE_ALIGNED long */ + size = MIN_SIZE_ALIGNED; + } +#if MEM_OVERFLOW_CHECK + size += MEM_SANITY_REGION_BEFORE_ALIGNED + MEM_SANITY_REGION_AFTER_ALIGNED; +#endif + if ((size > MEM_SIZE_ALIGNED) || (size < size_in)) { + return NULL; + } + + /* protect the heap from concurrent access */ + sys_mutex_lock(&mem_mutex); + LWIP_MEM_ALLOC_PROTECT(); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* run as long as a mem_free disturbed mem_malloc or mem_trim */ + do { + local_mem_free_count = 0; +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + /* Scan through the heap searching for a free block that is big enough, + * beginning with the lowest free block. + */ + for (ptr = mem_to_ptr(lfree); ptr < MEM_SIZE_ALIGNED - size; + ptr = ptr_to_mem(ptr)->next) { + mem = ptr_to_mem(ptr); +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* allow mem_free or mem_trim to run */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem. */ + local_mem_free_count = 1; + break; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + + if ((!mem->used) && + (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { + /* mem is not used and at least perfect fit is possible: + * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ + + if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { + /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing + * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') + * -> split large block, create empty remainder, + * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if + * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, + * struct mem would fit in but no data between mem2 and mem2->next + * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty + * region that couldn't hold data, but when mem->next gets freed, + * the 2 regions would be combined, resulting in more free memory + */ + ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size); + LWIP_ASSERT("invalid next ptr",ptr2 != MEM_SIZE_ALIGNED); + /* create mem2 struct */ + mem2 = ptr_to_mem(ptr2); + mem2->used = 0; + mem2->next = mem->next; + mem2->prev = ptr; + /* and insert it between mem and mem->next */ + mem->next = ptr2; + mem->used = 1; + + if (mem2->next != MEM_SIZE_ALIGNED) { + ptr_to_mem(mem2->next)->prev = ptr2; + } + MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); + } else { + /* (a mem2 struct does no fit into the user data space of mem and mem->next will always + * be used at this point: if not we have 2 unused structs in a row, plug_holes should have + * take care of this). + * -> near fit or exact fit: do not split, no mem2 creation + * also can't move mem->next directly behind mem, since mem->next + * will always be used at this point! + */ + mem->used = 1; + MEM_STATS_INC_USED(used, mem->next - mem_to_ptr(mem)); + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT +mem_malloc_adjust_lfree: +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + if (mem == lfree) { + struct mem *cur = lfree; + /* Find next free block after mem and update lowest free pointer */ + while (cur->used && cur != ram_end) { +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + mem_free_count = 0; + LWIP_MEM_ALLOC_UNPROTECT(); + /* prevent high interrupt latency... */ + LWIP_MEM_ALLOC_PROTECT(); + if (mem_free_count != 0) { + /* If mem_free or mem_trim have run, we have to restart since they + could have altered our current struct mem or lfree. */ + goto mem_malloc_adjust_lfree; + } +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + cur = ptr_to_mem(cur->next); + } + lfree = cur; + LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); + } + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", + (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); + LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", + ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); + LWIP_ASSERT("mem_malloc: sanity check alignment", + (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0); + +#if MEM_OVERFLOW_CHECK + mem_overflow_init_element(mem, size_in); +#endif + MEM_SANITY(); + return (u8_t *)mem + SIZEOF_STRUCT_MEM + MEM_SANITY_OFFSET; + } + } +#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT + /* if we got interrupted by a mem_free, try again */ + } while (local_mem_free_count != 0); +#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ + MEM_STATS_INC(err); + LWIP_MEM_ALLOC_UNPROTECT(); + sys_mutex_unlock(&mem_mutex); + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); + return NULL; +} + +#endif /* MEM_USE_POOLS */ + +#if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + return mem_clib_calloc(count, size); +} + +#else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ +/** + * Contiguously allocates enough space for count objects that are size bytes + * of memory each and returns a pointer to the allocated memory. + * + * The allocated memory is filled with bytes of value zero. + * + * @param count number of objects to allocate + * @param size size of the objects to allocate + * @return pointer to allocated memory / NULL pointer if there is an error + */ +void * +mem_calloc(mem_size_t count, mem_size_t size) +{ + void *p; + size_t alloc_size = (size_t)count * (size_t)size; + + if ((size_t)(mem_size_t)alloc_size != alloc_size) { + LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size)); + return NULL; + } + + /* allocate 'count' objects of size 'size' */ + p = mem_malloc((mem_size_t)alloc_size); + if (p) { + /* zero the memory */ + memset(p, 0, alloc_size); + } + return p; +} +#endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c new file mode 100644 index 0000000..18771f6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/memp.c @@ -0,0 +1,447 @@ +/** + * @file + * Dynamic pool memory manager + * + * lwIP has dedicated pools for many structures (netconn, protocol control blocks, + * packet buffers, ...). All these pools are managed here. + * + * @defgroup mempool Memory pools + * @ingroup infrastructure + * Custom memory pools + + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/stats.h" + +#include + +/* Make sure we include everything we need for size calculation required by memp_std.h */ +#include "lwip/pbuf.h" +#include "lwip/raw.h" +#include "lwip/udp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/altcp.h" +#include "lwip/ip4_frag.h" +#include "lwip/netbuf.h" +#include "lwip/api.h" +#include "lwip/priv/tcpip_priv.h" +#include "lwip/priv/api_msg.h" +#include "lwip/priv/sockets_priv.h" +#include "lwip/etharp.h" +#include "lwip/igmp.h" +#include "lwip/timeouts.h" +/* needed by default MEMP_NUM_SYS_TIMEOUT */ +#include "netif/ppp/ppp_opts.h" +#include "lwip/netdb.h" +#include "lwip/dns.h" +#include "lwip/priv/nd6_priv.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" + +#define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) +#include "lwip/priv/memp_std.h" + +const struct memp_desc *const memp_pools[MEMP_MAX] = { +#define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, +#include "lwip/priv/memp_std.h" +}; + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 +#undef MEMP_OVERFLOW_CHECK +/* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ +#define MEMP_OVERFLOW_CHECK 1 +#endif + +#if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC +/** + * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". + */ +static int +memp_sanity(const struct memp_desc *desc) +{ + struct memp *t, *h; + + t = *desc->tab; + if (t != NULL) { + for (h = t->next; (t != NULL) && (h != NULL); t = t->next, + h = ((h->next != NULL) ? h->next->next : NULL)) { + if (t == h) { + return 0; + } + } + } + + return 1; +} +#endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ + +#if MEMP_OVERFLOW_CHECK +/** + * Check if a memp element was victim of an overflow or underflow + * (e.g. the restricted area after/before it has been altered) + * + * @param p the memp element to check + * @param desc the pool p comes from + */ +static void +memp_overflow_check_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_check_raw((u8_t *)p + MEMP_SIZE, desc->size, "pool ", desc->desc); +} + +/** + * Initialize the restricted area of on memp element. + */ +static void +memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) +{ + mem_overflow_init_raw((u8_t *)p + MEMP_SIZE, desc->size); +} + +#if MEMP_OVERFLOW_CHECK >= 2 +/** + * Do an overflow check for all elements in every pool. + * + * @see memp_overflow_check_element for a description of the check + */ +static void +memp_overflow_check_all(void) +{ + u16_t i, j; + struct memp *p; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + + for (i = 0; i < MEMP_MAX; ++i) { + p = (struct memp *)LWIP_MEM_ALIGN(memp_pools[i]->base); + for (j = 0; j < memp_pools[i]->num; ++j) { + memp_overflow_check_element(p, memp_pools[i]); + p = LWIP_ALIGNMENT_CAST(struct memp *, ((u8_t *)p + MEMP_SIZE + memp_pools[i]->size + MEM_SANITY_REGION_AFTER_ALIGNED)); + } + } + SYS_ARCH_UNPROTECT(old_level); +} +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +#endif /* MEMP_OVERFLOW_CHECK */ + +/** + * Initialize custom memory pool. + * Related functions: memp_malloc_pool, memp_free_pool + * + * @param desc pool to initialize + */ +void +memp_init_pool(const struct memp_desc *desc) +{ +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); +#else + int i; + struct memp *memp; + + *desc->tab = NULL; + memp = (struct memp *)LWIP_MEM_ALIGN(desc->base); +#if MEMP_MEM_INIT + /* force memset on pool memory */ + memset(memp, 0, (size_t)desc->num * (MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + )); +#endif + /* create a linked list of memp elements */ + for (i = 0; i < desc->num; ++i) { + memp->next = *desc->tab; + *desc->tab = memp; +#if MEMP_OVERFLOW_CHECK + memp_overflow_init_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size +#if MEMP_OVERFLOW_CHECK + + MEM_SANITY_REGION_AFTER_ALIGNED +#endif + ); + } +#if MEMP_STATS + desc->stats->avail = desc->num; +#endif /* MEMP_STATS */ +#endif /* !MEMP_MEM_MALLOC */ + +#if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) + desc->stats->name = desc->desc; +#endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ +} + +/** + * Initializes lwIP built-in pools. + * Related functions: memp_malloc, memp_free + * + * Carves out memp_memory into linked lists for each pool-type. + */ +void +memp_init(void) +{ + u16_t i; + + /* for every pool: */ + for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { + memp_init_pool(memp_pools[i]); + +#if LWIP_STATS && MEMP_STATS + lwip_stats.memp[i] = memp_pools[i]->stats; +#endif + } + +#if MEMP_OVERFLOW_CHECK >= 2 + /* check everything a first time to see if it worked */ + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ +} + +static void * +#if !MEMP_OVERFLOW_CHECK +do_memp_malloc_pool(const struct memp_desc *desc) +#else +do_memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + +#if MEMP_MEM_MALLOC + memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); + SYS_ARCH_PROTECT(old_level); +#else /* MEMP_MEM_MALLOC */ + SYS_ARCH_PROTECT(old_level); + + memp = *desc->tab; +#endif /* MEMP_MEM_MALLOC */ + + if (memp != NULL) { +#if !MEMP_MEM_MALLOC +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + + *desc->tab = memp->next; +#if MEMP_OVERFLOW_CHECK + memp->next = NULL; +#endif /* MEMP_OVERFLOW_CHECK */ +#endif /* !MEMP_MEM_MALLOC */ +#if MEMP_OVERFLOW_CHECK + memp->file = file; + memp->line = line; +#if MEMP_MEM_MALLOC + memp_overflow_init_element(memp, desc); +#endif /* MEMP_MEM_MALLOC */ +#endif /* MEMP_OVERFLOW_CHECK */ + LWIP_ASSERT("memp_malloc: memp properly aligned", + ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); +#if MEMP_STATS + desc->stats->used++; + if (desc->stats->used > desc->stats->max) { + desc->stats->max = desc->stats->used; + } +#endif + SYS_ARCH_UNPROTECT(old_level); + /* cast through u8_t* to get rid of alignment warnings */ + return ((u8_t *)memp + MEMP_SIZE); + } else { +#if MEMP_STATS + desc->stats->err++; +#endif + SYS_ARCH_UNPROTECT(old_level); + LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); + } + + return NULL; +} + +/** + * Get an element from a custom pool. + * + * @param desc the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc_pool(const struct memp_desc *desc) +#else +memp_malloc_pool_fn(const struct memp_desc *desc, const char *file, const int line) +#endif +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if (desc == NULL) { + return NULL; + } + +#if !MEMP_OVERFLOW_CHECK + return do_memp_malloc_pool(desc); +#else + return do_memp_malloc_pool_fn(desc, file, line); +#endif +} + +/** + * Get an element from a specific pool. + * + * @param type the pool to get an element from + * + * @return a pointer to the allocated memory or a NULL pointer on error + */ +void * +#if !MEMP_OVERFLOW_CHECK +memp_malloc(memp_t type) +#else +memp_malloc_fn(memp_t type, const char *file, const int line) +#endif +{ + void *memp; + LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#if !MEMP_OVERFLOW_CHECK + memp = do_memp_malloc_pool(memp_pools[type]); +#else + memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); +#endif + + return memp; +} + +static void +do_memp_free_pool(const struct memp_desc *desc, void *mem) +{ + struct memp *memp; + SYS_ARCH_DECL_PROTECT(old_level); + + LWIP_ASSERT("memp_free: mem properly aligned", + ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); + + /* cast through void* to get rid of alignment warnings */ + memp = (struct memp *)(void *)((u8_t *)mem - MEMP_SIZE); + + SYS_ARCH_PROTECT(old_level); + +#if MEMP_OVERFLOW_CHECK == 1 + memp_overflow_check_element(memp, desc); +#endif /* MEMP_OVERFLOW_CHECK */ + +#if MEMP_STATS + desc->stats->used--; +#endif + +#if MEMP_MEM_MALLOC + LWIP_UNUSED_ARG(desc); + SYS_ARCH_UNPROTECT(old_level); + mem_free(memp); +#else /* MEMP_MEM_MALLOC */ + memp->next = *desc->tab; + *desc->tab = memp; + +#if MEMP_SANITY_CHECK + LWIP_ASSERT("memp sanity", memp_sanity(desc)); +#endif /* MEMP_SANITY_CHECK */ + + SYS_ARCH_UNPROTECT(old_level); +#endif /* !MEMP_MEM_MALLOC */ +} + +/** + * Put a custom pool element back into its pool. + * + * @param desc the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free_pool(const struct memp_desc *desc, void *mem) +{ + LWIP_ASSERT("invalid pool desc", desc != NULL); + if ((desc == NULL) || (mem == NULL)) { + return; + } + + do_memp_free_pool(desc, mem); +} + +/** + * Put an element back into its pool. + * + * @param type the pool where to put mem + * @param mem the memp element to free + */ +void +memp_free(memp_t type, void *mem) +{ +#ifdef LWIP_HOOK_MEMP_AVAILABLE + struct memp *old_first; +#endif + + LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); + + if (mem == NULL) { + return; + } + +#if MEMP_OVERFLOW_CHECK >= 2 + memp_overflow_check_all(); +#endif /* MEMP_OVERFLOW_CHECK >= 2 */ + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + old_first = *memp_pools[type]->tab; +#endif + + do_memp_free_pool(memp_pools[type], mem); + +#ifdef LWIP_HOOK_MEMP_AVAILABLE + if (old_first == NULL) { + LWIP_HOOK_MEMP_AVAILABLE(type); + } +#endif +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c new file mode 100644 index 0000000..a19018c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/netif.c @@ -0,0 +1,1795 @@ +/** + * @file + * lwIP network interface abstraction + * + * @defgroup netif Network interface (NETIF) + * @ingroup callbackstyle_api + * + * @defgroup netif_ip4 IPv4 address handling + * @ingroup netif + * + * @defgroup netif_ip6 IPv6 address handling + * @ingroup netif + * + * @defgroup netif_cd Client data handling + * Store data (void*) on a netif for application usage. + * @see @ref LWIP_NUM_NETIF_CLIENT_DATA + * @ingroup netif + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + */ + +#include "lwip/opt.h" + +#include /* memset */ +#include /* atoi */ + +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/udp.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/snmp.h" +#include "lwip/igmp.h" +#include "lwip/etharp.h" +#include "lwip/stats.h" +#include "lwip/sys.h" +#include "lwip/ip.h" +#if ENABLE_LOOPBACK +#if LWIP_NETIF_LOOPBACK_MULTITHREADING +#include "lwip/tcpip.h" +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#include "netif/ethernet.h" + +#if LWIP_AUTOIP +#include "lwip/autoip.h" +#endif /* LWIP_AUTOIP */ +#if LWIP_DHCP +#include "lwip/dhcp.h" +#endif /* LWIP_DHCP */ +#if LWIP_IPV6_DHCP6 +#include "lwip/dhcp6.h" +#endif /* LWIP_IPV6_DHCP6 */ +#if LWIP_IPV6_MLD +#include "lwip/mld6.h" +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6 +#include "lwip/nd6.h" +#endif + +#if LWIP_NETIF_STATUS_CALLBACK +#define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) +#else +#define NETIF_STATUS_CALLBACK(n) +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_LINK_CALLBACK +#define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) +#else +#define NETIF_LINK_CALLBACK(n) +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +static netif_ext_callback_t *ext_callback; +#endif + +#if !LWIP_SINGLE_NETIF +struct netif *netif_list; +#endif /* !LWIP_SINGLE_NETIF */ +struct netif *netif_default; + +#define netif_index_to_num(index) ((index) - 1) +static u8_t netif_num; + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +static u8_t netif_client_id; +#endif + +#define NETIF_REPORT_TYPE_IPV4 0x01 +#define NETIF_REPORT_TYPE_IPV6 0x02 +static void netif_issue_reports(struct netif *netif, u8_t report_type); + +#if LWIP_IPV6 +static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 +static err_t netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr); +#endif /* LWIP_IPV4 */ + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr); +#endif +#if LWIP_IPV6 +static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr); +#endif + + +static struct netif loop_netif; + +/** + * Initialize a lwip network interface structure for a loopback interface + * + * @param netif the lwip network interface structure for this loopif + * @return ERR_OK if the loopif is initialized + * ERR_MEM if private data couldn't be allocated + */ +static err_t +netif_loopif_init(struct netif *netif) +{ + LWIP_ASSERT("netif_loopif_init: invalid netif", netif != NULL); + + /* initialize the snmp variables and counters inside the struct netif + * ifSpeed: no assumption can be made! + */ + MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); + + netif->name[0] = 'l'; + netif->name[1] = 'o'; +#if LWIP_IPV4 + netif->output = netif_loop_output_ipv4; +#endif +#if LWIP_IPV6 + netif->output_ip6 = netif_loop_output_ipv6; +#endif +#if LWIP_LOOPIF_MULTICAST + netif_set_flags(netif, NETIF_FLAG_IGMP); +#endif + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_DISABLE_ALL); + return ERR_OK; +} +#endif /* LWIP_HAVE_LOOPIF */ + +void +netif_init(void) +{ +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +#define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, + ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; + IP4_ADDR(&loop_gw, 127, 0, 0, 1); + IP4_ADDR(&loop_ipaddr, 127, 0, 0, 1); + IP4_ADDR(&loop_netmask, 255, 0, 0, 0); +#else /* LWIP_IPV4 */ +#define LOOPIF_ADDRINIT +#endif /* LWIP_IPV4 */ + +#if NO_SYS + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); +#else /* NO_SYS */ + netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); +#endif /* NO_SYS */ + +#if LWIP_IPV6 + IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); + loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; +#endif /* LWIP_IPV6 */ + + netif_set_link_up(&loop_netif); + netif_set_up(&loop_netif); + +#endif /* LWIP_HAVE_LOOPIF */ +} + +/** + * @ingroup lwip_nosys + * Forwards a received packet for input processing with + * ethernet_input() or ip_input() depending on netif flags. + * Don't call directly, pass to netif_add() and call + * netif->input(). + * Only works if the netif driver correctly sets + * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! + */ +err_t +netif_input(struct pbuf *p, struct netif *inp) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_input: invalid pbuf", p != NULL); + LWIP_ASSERT("netif_input: invalid netif", inp != NULL); + +#if LWIP_ETHERNET + if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { + return ethernet_input(p, inp); + } else +#endif /* LWIP_ETHERNET */ + return ip_input(p, inp); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * Same as @ref netif_add but without IPv4 addresses + */ +struct netif * +netif_add_noaddr(struct netif *netif, void *state, netif_init_fn init, netif_input_fn input) +{ + return netif_add(netif, +#if LWIP_IPV4 + NULL, NULL, NULL, +#endif /* LWIP_IPV4*/ + state, init, input); +} + +/** + * @ingroup netif + * Add a network interface to the list of lwIP netifs. + * + * @param netif a pre-allocated netif structure + * @param ipaddr IP address for the new netif + * @param netmask network mask for the new netif + * @param gw default gateway IP address for the new netif + * @param state opaque data passed to the new netif + * @param init callback function that initializes the interface + * @param input callback function that is called to pass + * ingress packets up in the protocol layer stack.\n + * It is recommended to use a function that passes the input directly + * to the stack (netif_input(), NO_SYS=1 mode) or via sending a + * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n + * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET + * to decide whether to forward to ethernet_input() or ip_input(). + * In other words, the functions only work when the netif + * driver is implemented correctly!\n + * Most members of struct netif should be be initialized by the + * netif init function = netif driver (init parameter of this function).\n + * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after + * setting the MAC address in struct netif.hwaddr + * (IPv6 requires a link-local address). + * + * @return netif, or NULL if failed. + */ +struct netif * +netif_add(struct netif *netif, +#if LWIP_IPV4 + const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, +#endif /* LWIP_IPV4 */ + void *state, netif_init_fn init, netif_input_fn input) +{ +#if LWIP_IPV6 + s8_t i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_SINGLE_NETIF + if (netif_default != NULL) { + LWIP_ASSERT("single netif already set", 0); + return NULL; + } +#endif + + LWIP_ERROR("netif_add: invalid netif", netif != NULL, return NULL); + LWIP_ERROR("netif_add: No init function given", init != NULL, return NULL); + +#if LWIP_IPV4 + if (ipaddr == NULL) { + ipaddr = ip_2_ip4(IP4_ADDR_ANY); + } + if (netmask == NULL) { + netmask = ip_2_ip4(IP4_ADDR_ANY); + } + if (gw == NULL) { + gw = ip_2_ip4(IP4_ADDR_ANY); + } + + /* reset new interface configuration state */ + ip_addr_set_zero_ip4(&netif->ip_addr); + ip_addr_set_zero_ip4(&netif->netmask); + ip_addr_set_zero_ip4(&netif->gw); + netif->output = netif_null_output_ip4; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + ip_addr_set_zero_ip6(&netif->ip6_addr[i]); + netif->ip6_addr_state[i] = IP6_ADDR_INVALID; +#if LWIP_IPV6_ADDRESS_LIFETIMES + netif->ip6_addr_valid_life[i] = IP6_ADDR_LIFE_STATIC; + netif->ip6_addr_pref_life[i] = IP6_ADDR_LIFE_STATIC; +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + } + netif->output_ip6 = netif_null_output_ip6; +#endif /* LWIP_IPV6 */ + NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); + netif->mtu = 0; + netif->flags = 0; +#ifdef netif_get_client_data + memset(netif->client_data, 0, sizeof(netif->client_data)); +#endif /* LWIP_NUM_NETIF_CLIENT_DATA */ +#if LWIP_IPV6 +#if LWIP_IPV6_AUTOCONFIG + /* IPv6 address autoconfiguration not enabled by default */ + netif->ip6_autoconfig_enabled = 0; +#endif /* LWIP_IPV6_AUTOCONFIG */ + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ +#if LWIP_NETIF_STATUS_CALLBACK + netif->status_callback = NULL; +#endif /* LWIP_NETIF_STATUS_CALLBACK */ +#if LWIP_NETIF_LINK_CALLBACK + netif->link_callback = NULL; +#endif /* LWIP_NETIF_LINK_CALLBACK */ +#if LWIP_IGMP + netif->igmp_mac_filter = NULL; +#endif /* LWIP_IGMP */ +#if LWIP_IPV6 && LWIP_IPV6_MLD + netif->mld_mac_filter = NULL; +#endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ +#if ENABLE_LOOPBACK + netif->loop_first = NULL; + netif->loop_last = NULL; +#endif /* ENABLE_LOOPBACK */ + + /* remember netif specific state information data */ + netif->state = state; + netif->num = netif_num; + netif->input = input; + + NETIF_RESET_HINTS(netif); +#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS + netif->loop_cnt_current = 0; +#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ + +#if LWIP_IPV4 + netif_set_addr(netif, ipaddr, netmask, gw); +#endif /* LWIP_IPV4 */ + + /* call user specified initialization function for netif */ + if (init(netif) != ERR_OK) { + return NULL; + } +#if LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES + /* Initialize the MTU for IPv6 to the one set by the netif driver. + This can be updated later by RA. */ + netif->mtu6 = netif->mtu; +#endif /* LWIP_IPV6 && LWIP_ND6_ALLOW_RA_UPDATES */ + +#if !LWIP_SINGLE_NETIF + /* Assign a unique netif number in the range [0..254], so that (num+1) can + serve as an interface index that fits in a u8_t. + We assume that the new netif has not yet been added to the list here. + This algorithm is O(n^2), but that should be OK for lwIP. + */ + { + struct netif *netif2; + int num_netifs; + do { + if (netif->num == 255) { + netif->num = 0; + } + num_netifs = 0; + for (netif2 = netif_list; netif2 != NULL; netif2 = netif2->next) { + LWIP_ASSERT("netif already added", netif2 != netif); + num_netifs++; + LWIP_ASSERT("too many netifs, max. supported number is 255", num_netifs <= 255); + if (netif2->num == netif->num) { + netif->num++; + break; + } + } + } while (netif2 != NULL); + } + if (netif->num == 254) { + netif_num = 0; + } else { + netif_num = (u8_t)(netif->num + 1); + } + + /* add this netif to the list */ + netif->next = netif_list; + netif_list = netif; +#endif /* "LWIP_SINGLE_NETIF */ + mib2_netif_added(netif); + +#if LWIP_IGMP + /* start IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_start(netif); + } +#endif /* LWIP_IGMP */ + + LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", + netif->name[0], netif->name[1])); +#if LWIP_IPV4 + LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); + ip4_addr_debug_print(NETIF_DEBUG, ipaddr); + LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); + ip4_addr_debug_print(NETIF_DEBUG, netmask); + LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); + ip4_addr_debug_print(NETIF_DEBUG, gw); +#endif /* LWIP_IPV4 */ + LWIP_DEBUGF(NETIF_DEBUG, ("\n")); + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_ADDED, NULL); + + return netif; +} + +static void +netif_do_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ +#if LWIP_TCP + tcp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_TCP */ +#if LWIP_UDP + udp_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_UDP */ +#if LWIP_RAW + raw_netif_ip_addr_changed(old_addr, new_addr); +#endif /* LWIP_RAW */ +} + +#if LWIP_IPV4 +static int +netif_do_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr, ip_addr_t *old_addr) +{ + LWIP_ASSERT("invalid pointer", ipaddr != NULL); + LWIP_ASSERT("invalid pointer", old_addr != NULL); + + /* address is actually being changed? */ + if (ip4_addr_cmp(ipaddr, netif_ip4_addr(netif)) == 0) { + ip_addr_t new_addr; + *ip_2_ip4(&new_addr) = *ipaddr; + IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); + + ip_addr_copy(*old_addr, *netif_ip_addr4(netif)); + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); + netif_do_ip_addr_changed(old_addr, &new_addr); + + mib2_remove_ip4(netif); + mib2_remove_route_ip4(0, netif); + /* set new IP address to netif */ + ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); + IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); + mib2_add_ip4(netif); + mib2_add_route_ip4(0, netif); + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); + + NETIF_STATUS_CALLBACK(netif); + return 1; /* address changed */ + } + return 0; /* address unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the IP address of a network interface + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * + * @note call netif_set_addr() if you also want to change netmask and + * default gateway + */ +void +netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) +{ + ip_addr_t old_addr; + + LWIP_ERROR("netif_set_ipaddr: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_ADDRESS_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_netmask(struct netif *netif, const ip4_addr_t *netmask, ip_addr_t *old_nm) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(netmask, netif_ip4_netmask(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_nm != NULL); + ip_addr_copy(*old_nm, *netif_ip_netmask4(netif)); +#else + LWIP_UNUSED_ARG(old_nm); +#endif + mib2_remove_route_ip4(0, netif); + /* set new netmask to netif */ + ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); + IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); + mib2_add_route_ip4(0, netif); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_netmask(netif)), + ip4_addr2_16(netif_ip4_netmask(netif)), + ip4_addr3_16(netif_ip4_netmask(netif)), + ip4_addr4_16(netif_ip4_netmask(netif)))); + return 1; /* netmask changed */ + } + return 0; /* netmask unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the netmask of a network interface + * + * @param netif the network interface to change + * @param netmask the new netmask + * + * @note call netif_set_addr() if you also want to change ip address and + * default gateway + */ +void +netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_nm_val; + ip_addr_t *old_nm = &old_nm_val; +#else + ip_addr_t *old_nm = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_netmask: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_netmask = old_nm; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_NETMASK_CHANGED, &args); +#endif + } +} + +static int +netif_do_set_gw(struct netif *netif, const ip4_addr_t *gw, ip_addr_t *old_gw) +{ + /* address is actually being changed? */ + if (ip4_addr_cmp(gw, netif_ip4_gw(netif)) == 0) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + LWIP_ASSERT("invalid pointer", old_gw != NULL); + ip_addr_copy(*old_gw, *netif_ip_gw4(netif)); +#else + LWIP_UNUSED_ARG(old_gw); +#endif + + ip4_addr_set(ip_2_ip4(&netif->gw), gw); + IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", + netif->name[0], netif->name[1], + ip4_addr1_16(netif_ip4_gw(netif)), + ip4_addr2_16(netif_ip4_gw(netif)), + ip4_addr3_16(netif_ip4_gw(netif)), + ip4_addr4_16(netif_ip4_gw(netif)))); + return 1; /* gateway changed */ + } + return 0; /* gateway unchanged */ +} + +/** + * @ingroup netif_ip4 + * Change the default gateway for a network interface + * + * @param netif the network interface to change + * @param gw the new default gateway + * + * @note call netif_set_addr() if you also want to change ip address and netmask + */ +void +netif_set_gw(struct netif *netif, const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + ip_addr_t old_gw_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_gw = NULL; +#endif + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_gw: invalid netif", netif != NULL, return); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_ext_callback_args_t args; + args.ipv4_changed.old_gw = old_gw; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV4_GATEWAY_CHANGED, &args); +#endif + } +} + +/** + * @ingroup netif_ip4 + * Change IP address configuration for a network interface (including netmask + * and default gateway). + * + * @param netif the network interface to change + * @param ipaddr the new IP address + * @param netmask the new netmask + * @param gw the new default gateway + */ +void +netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, + const ip4_addr_t *gw) +{ +#if LWIP_NETIF_EXT_STATUS_CALLBACK + netif_nsc_reason_t change_reason = LWIP_NSC_NONE; + netif_ext_callback_args_t cb_args; + ip_addr_t old_nm_val; + ip_addr_t old_gw_val; + ip_addr_t *old_nm = &old_nm_val; + ip_addr_t *old_gw = &old_gw_val; +#else + ip_addr_t *old_nm = NULL; + ip_addr_t *old_gw = NULL; +#endif + ip_addr_t old_addr; + int remove; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY4; + } + if (netmask == NULL) { + netmask = IP4_ADDR_ANY4; + } + if (gw == NULL) { + gw = IP4_ADDR_ANY4; + } + + remove = ip4_addr_isany(ipaddr); + if (remove) { + /* when removing an address, we have to remove it *before* changing netmask/gw + to ensure that tcp RST segment can be sent correctly */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + if (netif_do_set_netmask(netif, netmask, old_nm)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_NETMASK_CHANGED; + cb_args.ipv4_changed.old_netmask = old_nm; +#endif + } + if (netif_do_set_gw(netif, gw, old_gw)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_GATEWAY_CHANGED; + cb_args.ipv4_changed.old_gw = old_gw; +#endif + } + if (!remove) { + /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ + if (netif_do_set_ipaddr(netif, ipaddr, &old_addr)) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + change_reason |= LWIP_NSC_IPV4_ADDRESS_CHANGED; + cb_args.ipv4_changed.old_address = &old_addr; +#endif + } + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + if (change_reason != LWIP_NSC_NONE) { + change_reason |= LWIP_NSC_IPV4_SETTINGS_CHANGED; + netif_invoke_ext_callback(netif, change_reason, &cb_args); + } +#endif +} +#endif /* LWIP_IPV4*/ + +/** + * @ingroup netif + * Remove a network interface from the list of lwIP netifs. + * + * @param netif the network interface to remove + */ +void +netif_remove(struct netif *netif) +{ +#if LWIP_IPV6 + int i; +#endif + + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + return; + } + + netif_invoke_ext_callback(netif, LWIP_NSC_NETIF_REMOVED, NULL); + +#if LWIP_IPV4 + if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { + netif_do_ip_addr_changed(netif_ip_addr4(netif), NULL); + } + +#if LWIP_IGMP + /* stop IGMP processing */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_stop(netif); + } +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4*/ + +#if LWIP_IPV6 + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, i), NULL); + } + } +#if LWIP_IPV6_MLD + /* stop MLD processing */ + mld6_stop(netif); +#endif /* LWIP_IPV6_MLD */ +#endif /* LWIP_IPV6 */ + if (netif_is_up(netif)) { + /* set netif down before removing (call callback function) */ + netif_set_down(netif); + } + + mib2_remove_ip4(netif); + + /* this netif is default? */ + if (netif_default == netif) { + /* reset default netif */ + netif_set_default(NULL); + } +#if !LWIP_SINGLE_NETIF + /* is it the first netif? */ + if (netif_list == netif) { + netif_list = netif->next; + } else { + /* look for netif further down the list */ + struct netif *tmp_netif; + NETIF_FOREACH(tmp_netif) { + if (tmp_netif->next == netif) { + tmp_netif->next = netif->next; + break; + } + } + if (tmp_netif == NULL) { + return; /* netif is not on the list */ + } + } +#endif /* !LWIP_SINGLE_NETIF */ + mib2_netif_removed(netif); +#if LWIP_NETIF_REMOVE_CALLBACK + if (netif->remove_callback) { + netif->remove_callback(netif); + } +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); +} + +/** + * @ingroup netif + * Set a network interface as the default network interface + * (used to output all packets for which no specific route is found) + * + * @param netif the default network interface + */ +void +netif_set_default(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif == NULL) { + /* remove default route */ + mib2_remove_route_ip4(1, netif); + } else { + /* install default route */ + mib2_add_route_ip4(1, netif); + } + netif_default = netif; + LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", + netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); +} + +/** + * @ingroup netif + * Bring an interface up, available for processing + * traffic. + */ +void +netif_set_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_UP)) { + netif_set_flags(netif, NETIF_FLAG_UP); + + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + + NETIF_STATUS_CALLBACK(netif); + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + } +} + +/** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change + */ +static void +netif_issue_reports(struct netif *netif, u8_t report_type) +{ + LWIP_ASSERT("netif_issue_reports: invalid netif", netif != NULL); + + /* Only send reports when both link and admin states are up */ + if (!(netif->flags & NETIF_FLAG_LINK_UP) || + !(netif->flags & NETIF_FLAG_UP)) { + return; + } + +#if LWIP_IPV4 + if ((report_type & NETIF_REPORT_TYPE_IPV4) && + !ip4_addr_isany_val(*netif_ip4_addr(netif))) { +#if LWIP_ARP + /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ + if (netif->flags & (NETIF_FLAG_ETHARP)) { + etharp_gratuitous(netif); + } +#endif /* LWIP_ARP */ + +#if LWIP_IGMP + /* resend IGMP memberships */ + if (netif->flags & NETIF_FLAG_IGMP) { + igmp_report_groups(netif); + } +#endif /* LWIP_IGMP */ + } +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 + if (report_type & NETIF_REPORT_TYPE_IPV6) { +#if LWIP_IPV6_MLD + /* send mld memberships */ + mld6_report_groups(netif); +#endif /* LWIP_IPV6_MLD */ + } +#endif /* LWIP_IPV6 */ +} + +/** + * @ingroup netif + * Bring an interface down, disabling any traffic processing. + */ +void +netif_set_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_UP) { +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.status_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_STATUS_CHANGED, &args); + } +#endif + + netif_clear_flags(netif, NETIF_FLAG_UP); + MIB2_COPY_SYSUPTIME_TO(&netif->ts); + +#if LWIP_IPV4 && LWIP_ARP + if (netif->flags & NETIF_FLAG_ETHARP) { + etharp_cleanup_netif(netif); + } +#endif /* LWIP_IPV4 && LWIP_ARP */ + +#if LWIP_IPV6 + nd6_cleanup_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_STATUS_CALLBACK(netif); + } +} + +#if LWIP_NETIF_STATUS_CALLBACK +/** + * @ingroup netif + * Set callback to be called when interface is brought up/down or address is changed while up + */ +void +netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->status_callback = status_callback; + } +} +#endif /* LWIP_NETIF_STATUS_CALLBACK */ + +#if LWIP_NETIF_REMOVE_CALLBACK +/** + * @ingroup netif + * Set callback to be called when the interface has been removed + */ +void +netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->remove_callback = remove_callback; + } +} +#endif /* LWIP_NETIF_REMOVE_CALLBACK */ + +/** + * @ingroup netif + * Called by a driver when its link goes up + */ +void +netif_set_link_up(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_up: invalid netif", netif != NULL, return); + + if (!(netif->flags & NETIF_FLAG_LINK_UP)) { + netif_set_flags(netif, NETIF_FLAG_LINK_UP); + +#if LWIP_DHCP + dhcp_network_changed(netif); +#endif /* LWIP_DHCP */ + +#if LWIP_AUTOIP + autoip_network_changed(netif); +#endif /* LWIP_AUTOIP */ + + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4 | NETIF_REPORT_TYPE_IPV6); +#if LWIP_IPV6 + nd6_restart_netif(netif); +#endif /* LWIP_IPV6 */ + + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 1; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +/** + * @ingroup netif + * Called by a driver when its link goes down + */ +void +netif_set_link_down(struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("netif_set_link_down: invalid netif", netif != NULL, return); + + if (netif->flags & NETIF_FLAG_LINK_UP) { + netif_clear_flags(netif, NETIF_FLAG_LINK_UP); + NETIF_LINK_CALLBACK(netif); +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.link_changed.state = 0; + netif_invoke_ext_callback(netif, LWIP_NSC_LINK_CHANGED, &args); + } +#endif + } +} + +#if LWIP_NETIF_LINK_CALLBACK +/** + * @ingroup netif + * Set callback to be called when link is brought up/down + */ +void +netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif) { + netif->link_callback = link_callback; + } +} +#endif /* LWIP_NETIF_LINK_CALLBACK */ + +#if ENABLE_LOOPBACK +/** + * @ingroup netif + * Send an IP packet to be received on the same netif (loopif-like). + * The pbuf is simply copied and handed back to netif->input. + * In multithreaded mode, this is done directly since netif->input must put + * the packet on a queue. + * In callback mode, the packet is put on an internal queue and is fed to + * netif->input by netif_poll(). + * + * @param netif the lwip network interface structure + * @param p the (IP) packet to 'send' + * @return ERR_OK if the packet has been sent + * ERR_MEM if the pbuf used to copy the packet couldn't be allocated + */ +err_t +netif_loop_output(struct netif *netif, struct pbuf *p) +{ + struct pbuf *r; + err_t err; + struct pbuf *last; +#if LWIP_LOOPBACK_MAX_PBUFS + u16_t clen = 0; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + u8_t schedule_poll = 0; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_loop_output: invalid netif", netif != NULL); + LWIP_ASSERT("netif_loop_output: invalid pbuf", p != NULL); + + /* Allocate a new pbuf */ + r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); + if (r == NULL) { + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } +#if LWIP_LOOPBACK_MAX_PBUFS + clen = pbuf_clen(r); + /* check for overflow or too many pbuf on queue */ + if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || + ((netif->loop_cnt_current + clen) > LWIP_MIN(LWIP_LOOPBACK_MAX_PBUFS, 0xFFFF))) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return ERR_MEM; + } + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current + clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* Copy the whole pbuf queue p into the single pbuf r */ + if ((err = pbuf_copy(r, p)) != ERR_OK) { + pbuf_free(r); + LINK_STATS_INC(link.memerr); + LINK_STATS_INC(link.drop); + MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); + return err; + } + + /* Put the packet on a linked list which gets emptied through calling + netif_poll(). */ + + /* let last point to the last pbuf in chain r */ + for (last = r; last->next != NULL; last = last->next) { + /* nothing to do here, just get to the last pbuf */ + } + + SYS_ARCH_PROTECT(lev); + if (netif->loop_first != NULL) { + LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); + netif->loop_last->next = r; + netif->loop_last = last; + } else { + netif->loop_first = r; + netif->loop_last = last; +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* No existing packets queued, schedule poll */ + schedule_poll = 1; +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + } + SYS_ARCH_UNPROTECT(lev); + + LINK_STATS_INC(link.xmit); + MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); + +#if LWIP_NETIF_LOOPBACK_MULTITHREADING + /* For multithreading environment, schedule a call to netif_poll */ + if (schedule_poll) { + tcpip_try_callback((tcpip_callback_fn)netif_poll, netif); + } +#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ + + return ERR_OK; +} + +#if LWIP_HAVE_LOOPIF +#if LWIP_IPV4 +static err_t +netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +static err_t +netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t *addr) +{ + LWIP_UNUSED_ARG(addr); + return netif_loop_output(netif, p); +} +#endif /* LWIP_IPV6 */ +#endif /* LWIP_HAVE_LOOPIF */ + + +/** + * Call netif_poll() in the main loop of your application. This is to prevent + * reentering non-reentrant functions like tcp_input(). Packets passed to + * netif_loop_output() are put on a list that is passed to netif->input() by + * netif_poll(). + */ +void +netif_poll(struct netif *netif) +{ + /* If we have a loopif, SNMP counters are adjusted for it, + * if not they are adjusted for 'netif'. */ +#if MIB2_STATS +#if LWIP_HAVE_LOOPIF + struct netif *stats_if = &loop_netif; +#else /* LWIP_HAVE_LOOPIF */ + struct netif *stats_if = netif; +#endif /* LWIP_HAVE_LOOPIF */ +#endif /* MIB2_STATS */ + SYS_ARCH_DECL_PROTECT(lev); + + LWIP_ASSERT("netif_poll: invalid netif", netif != NULL); + + /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ + SYS_ARCH_PROTECT(lev); + while (netif->loop_first != NULL) { + struct pbuf *in, *in_end; +#if LWIP_LOOPBACK_MAX_PBUFS + u8_t clen = 1; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + in = in_end = netif->loop_first; + while (in_end->len != in_end->tot_len) { + LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); + in_end = in_end->next; +#if LWIP_LOOPBACK_MAX_PBUFS + clen++; +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + } +#if LWIP_LOOPBACK_MAX_PBUFS + /* adjust the number of pbufs on queue */ + LWIP_ASSERT("netif->loop_cnt_current underflow", + ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); + netif->loop_cnt_current = (u16_t)(netif->loop_cnt_current - clen); +#endif /* LWIP_LOOPBACK_MAX_PBUFS */ + + /* 'in_end' now points to the last pbuf from 'in' */ + if (in_end == netif->loop_last) { + /* this was the last pbuf in the list */ + netif->loop_first = netif->loop_last = NULL; + } else { + /* pop the pbuf off the list */ + netif->loop_first = in_end->next; + LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); + } + /* De-queue the pbuf from its successors on the 'loop_' list. */ + in_end->next = NULL; + SYS_ARCH_UNPROTECT(lev); + + in->if_idx = netif_get_index(netif); + + LINK_STATS_INC(link.recv); + MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); + MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); + /* loopback packets are always IP packets! */ + if (ip_input(in, netif) != ERR_OK) { + pbuf_free(in); + } + SYS_ARCH_PROTECT(lev); + } + SYS_ARCH_UNPROTECT(lev); +} + +#if !LWIP_NETIF_LOOPBACK_MULTITHREADING +/** + * Calls netif_poll() for every netif on the netif_list. + */ +void +netif_poll_all(void) +{ + struct netif *netif; + /* loop through netifs */ + NETIF_FOREACH(netif) { + netif_poll(netif); + } +} +#endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ +#endif /* ENABLE_LOOPBACK */ + +#if LWIP_NUM_NETIF_CLIENT_DATA > 0 +/** + * @ingroup netif_cd + * Allocate an index to store data in client_data member of struct netif. + * Returned value is an index in mentioned array. + * @see LWIP_NUM_NETIF_CLIENT_DATA + */ +u8_t +netif_alloc_client_data_id(void) +{ + u8_t result = netif_client_id; + netif_client_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NUM_NETIF_CLIENT_DATA > 256 +#error LWIP_NUM_NETIF_CLIENT_DATA must be <= 256 +#endif + LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); + return (u8_t)(result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX); +} +#endif + +#if LWIP_IPV6 +/** + * @ingroup netif_ip6 + * Change an IPv6 address of a network interface + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param addr6 the new IPv6 address + * + * @note call netif_ip6_addr_set_state() to set the address valid/temptative + */ +void +netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_ip6_addr_set: invalid netif", netif != NULL); + LWIP_ASSERT("netif_ip6_addr_set: invalid addr6", addr6 != NULL); + + netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], + addr6->addr[2], addr6->addr[3]); +} + +/* + * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param i0 word0 of the new IPv6 address + * @param i1 word1 of the new IPv6 address + * @param i2 word2 of the new IPv6 address + * @param i3 word3 of the new IPv6 address + */ +void +netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) +{ + ip_addr_t old_addr; + ip_addr_t new_ipaddr; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + ip6_addr_copy(*ip_2_ip6(&old_addr), *netif_ip6_addr(netif, addr_idx)); + IP_SET_TYPE_VAL(old_addr, IPADDR_TYPE_V6); + + /* address is actually being changed? */ + if ((ip_2_ip6(&old_addr)->addr[0] != i0) || (ip_2_ip6(&old_addr)->addr[1] != i1) || + (ip_2_ip6(&old_addr)->addr[2] != i2) || (ip_2_ip6(&old_addr)->addr[3] != i3)) { + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); + + IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); + ip6_addr_assign_zone(ip_2_ip6(&new_ipaddr), IP6_UNICAST, netif); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); + } + /* @todo: remove/readd mib2 ip6 entries? */ + + ip_addr_copy(netif->ip6_addr[addr_idx], new_ipaddr); + + if (ip6_addr_isvalid(netif_ip6_addr_state(netif, addr_idx))) { + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_set.addr_index = addr_idx; + args.ipv6_set.old_address = &old_addr; + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_SET, &args); + } +#endif + } + + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * @ingroup netif_ip6 + * Change the state of an IPv6 address of a network interface + * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE + * includes the number of checks done, see ip6_addr.h) + * + * @param netif the network interface to change + * @param addr_idx index of the IPv6 address + * @param state the new IPv6 address state + */ +void +netif_ip6_addr_set_state(struct netif *netif, s8_t addr_idx, u8_t state) +{ + u8_t old_state; + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("netif != NULL", netif != NULL); + LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); + + old_state = netif_ip6_addr_state(netif, addr_idx); + /* state is actually being changed? */ + if (old_state != state) { + u8_t old_valid = old_state & IP6_ADDR_VALID; + u8_t new_valid = state & IP6_ADDR_VALID; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); + +#if LWIP_IPV6_MLD + /* Reevaluate solicited-node multicast group membership. */ + if (netif->flags & NETIF_FLAG_MLD6) { + nd6_adjust_mld_membership(netif, addr_idx, state); + } +#endif /* LWIP_IPV6_MLD */ + + if (old_valid && !new_valid) { + /* address about to be removed by setting invalid */ + netif_do_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); + /* @todo: remove mib2 ip6 entries? */ + } + netif->ip6_addr_state[addr_idx] = state; + + if (!old_valid && new_valid) { + /* address added by setting valid */ + /* This is a good moment to check that the address is properly zoned. */ + IP6_ADDR_ZONECHECK_NETIF(netif_ip6_addr(netif, addr_idx), netif); + /* @todo: add mib2 ip6 entries? */ + netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); + } + if ((old_state & ~IP6_ADDR_TENTATIVE_COUNT_MASK) != + (state & ~IP6_ADDR_TENTATIVE_COUNT_MASK)) { + /* address state has changed -> call the callback function */ + NETIF_STATUS_CALLBACK(netif); + } + +#if LWIP_NETIF_EXT_STATUS_CALLBACK + { + netif_ext_callback_args_t args; + args.ipv6_addr_state_changed.addr_index = addr_idx; + args.ipv6_addr_state_changed.old_state = old_state; + args.ipv6_addr_state_changed.address = netif_ip_addr6(netif, addr_idx); + netif_invoke_ext_callback(netif, LWIP_NSC_IPV6_ADDR_STATE_CHANGED, &args); + } +#endif + } + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", + addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), + netif_ip6_addr_state(netif, addr_idx))); +} + +/** + * Checks if a specific local address is present on the netif and returns its + * index. Depending on its state, it may or may not be assigned to the + * interface (as per RFC terminology). + * + * The given address may or may not be zoned (i.e., have a zone index other + * than IP6_NO_ZONE). If the address is zoned, it must have the correct zone + * for the given netif, or no match will be found. + * + * @param netif the netif to check + * @param ip6addr the IPv6 address to find + * @return >= 0: address found, this is its index + * -1: address not found on this netif + */ +s8_t +netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_get_ip6_addr_match: invalid netif", netif != NULL); + LWIP_ASSERT("netif_get_ip6_addr_match: invalid ip6addr", ip6addr != NULL); + +#if LWIP_IPV6_SCOPES + if (ip6_addr_has_zone(ip6addr) && !ip6_addr_test_zone(ip6addr, netif)) { + return -1; /* wrong zone, no match */ + } +#endif /* LWIP_IPV6_SCOPES */ + + for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && + ip6_addr_cmp_zoneless(netif_ip6_addr(netif, i), ip6addr)) { + return i; + } + } + return -1; +} + +/** + * @ingroup netif_ip6 + * Create a link-local IPv6 address on a netif (stored in slot 0) + * + * @param netif the netif to create the address on + * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) + * if == 0, use hwaddr directly as interface ID + */ +void +netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) +{ + u8_t i, addr_index; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_create_ip6_linklocal_address: invalid netif", netif != NULL); + + /* Link-local prefix. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); + ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; + + /* Generate interface ID. */ + if (from_mac_48bit) { + /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | + ((u32_t)(netif->hwaddr[1]) << 16) | + ((u32_t)(netif->hwaddr[2]) << 8) | + (0xff)); + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((u32_t)(0xfeul << 24) | + ((u32_t)(netif->hwaddr[3]) << 16) | + ((u32_t)(netif->hwaddr[4]) << 8) | + (netif->hwaddr[5])); + } else { + /* Use hwaddr directly as interface ID. */ + ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; + ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; + + addr_index = 3; + for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { + if (i == 4) { + addr_index--; + } + ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= lwip_htonl(((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03))); + } + } + + /* Set a link-local zone. Even though the zone is implied by the owning + * netif, setting the zone anyway has two important conceptual advantages: + * 1) it avoids the need for a ton of exceptions in internal code, allowing + * e.g. ip6_addr_cmp() to be used on local addresses; + * 2) the properly zoned address is visible externally, e.g. when any outside + * code enumerates available addresses or uses one to bind a socket. + * Any external code unaware of address scoping is likely to just ignore the + * zone field, so this should not create any compatibility problems. */ + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[0]), IP6_UNICAST, netif); + + /* Set address state. */ +#if LWIP_IPV6_DUP_DETECT_ATTEMPTS + /* Will perform duplicate address detection (DAD). */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); +#else + /* Consider address valid. */ + netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); +#endif /* LWIP_IPV6_AUTOCONFIG */ +} + +/** + * @ingroup netif_ip6 + * This function allows for the easy addition of a new IPv6 address to an interface. + * It takes care of finding an empty slot and then sets the address tentative + * (to make sure that all the subsequent processing happens). + * + * @param netif netif to add the address on + * @param ip6addr address to add + * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here + */ +err_t +netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) +{ + s8_t i; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("netif_add_ip6_address: invalid netif", netif != NULL); + LWIP_ASSERT("netif_add_ip6_address: invalid ip6addr", ip6addr != NULL); + + i = netif_get_ip6_addr_match(netif, ip6addr); + if (i >= 0) { + /* Address already added */ + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + + /* Find a free slot. The first one is reserved for link-local addresses. */ + for (i = ip6_addr_islinklocal(ip6addr) ? 0 : 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { + if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { + ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); + ip6_addr_assign_zone(ip_2_ip6(&netif->ip6_addr[i]), IP6_UNICAST, netif); + netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); + if (chosen_idx != NULL) { + *chosen_idx = i; + } + return ERR_OK; + } + } + + if (chosen_idx != NULL) { + *chosen_idx = -1; + } + return ERR_VAL; +} + +/** Dummy IPv6 output function for netifs not supporting IPv6 + */ +static err_t +netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV6 */ + +#if LWIP_IPV4 +/** Dummy IPv4 output function for netifs not supporting IPv4 + */ +static err_t +netif_null_output_ip4(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr) +{ + LWIP_UNUSED_ARG(netif); + LWIP_UNUSED_ARG(p); + LWIP_UNUSED_ARG(ipaddr); + + return ERR_IF; +} +#endif /* LWIP_IPV4 */ + +/** +* @ingroup netif +* Return the interface index for the netif with name +* or NETIF_NO_INDEX if not found/on error +* +* @param name the name of the netif +*/ +u8_t +netif_name_to_index(const char *name) +{ + struct netif *netif = netif_find(name); + if (netif != NULL) { + return netif_get_index(netif); + } + /* No name found, return invalid index */ + return NETIF_NO_INDEX; +} + +/** +* @ingroup netif +* Return the interface name for the netif matching index +* or NULL if not found/on error +* +* @param idx the interface index of the netif +* @param name char buffer of at least NETIF_NAMESIZE bytes +*/ +char * +netif_index_to_name(u8_t idx, char *name) +{ + struct netif *netif = netif_get_by_index(idx); + + if (netif != NULL) { + name[0] = netif->name[0]; + name[1] = netif->name[1]; + lwip_itoa(&name[2], NETIF_NAMESIZE - 2, netif_index_to_num(idx)); + return name; + } + return NULL; +} + +/** +* @ingroup netif +* Return the interface for the netif index +* +* @param idx index of netif to find +*/ +struct netif * +netif_get_by_index(u8_t idx) +{ + struct netif *netif; + + LWIP_ASSERT_CORE_LOCKED(); + + if (idx != NETIF_NO_INDEX) { + NETIF_FOREACH(netif) { + if (idx == netif_get_index(netif)) { + return netif; /* found! */ + } + } + } + + return NULL; +} + +/** + * @ingroup netif + * Find a network interface by searching for its name + * + * @param name the name of the netif (like netif->name) plus concatenated number + * in ascii representation (e.g. 'en0') + */ +struct netif * +netif_find(const char *name) +{ + struct netif *netif; + u8_t num; + + LWIP_ASSERT_CORE_LOCKED(); + + if (name == NULL) { + return NULL; + } + + num = (u8_t)atoi(&name[2]); + + NETIF_FOREACH(netif) { + if (num == netif->num && + name[0] == netif->name[0] && + name[1] == netif->name[1]) { + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); + return netif; + } + } + LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); + return NULL; +} + +#if LWIP_NETIF_EXT_STATUS_CALLBACK +/** + * @ingroup netif + * Add extended netif events listener + * @param callback pointer to listener structure + * @param fn callback function + */ +void +netif_add_ext_callback(netif_ext_callback_t *callback, netif_ext_callback_fn fn) +{ + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + LWIP_ASSERT("fn must be != NULL", fn != NULL); + + callback->callback_fn = fn; + callback->next = ext_callback; + ext_callback = callback; +} + +/** + * @ingroup netif + * Remove extended netif events listener + * @param callback pointer to listener structure + */ +void +netif_remove_ext_callback(netif_ext_callback_t* callback) +{ + netif_ext_callback_t *last, *iter; + + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("callback must be != NULL", callback != NULL); + + if (ext_callback == NULL) { + return; + } + + if (callback == ext_callback) { + ext_callback = ext_callback->next; + } else { + last = ext_callback; + for (iter = ext_callback->next; iter != NULL; last = iter, iter = iter->next) { + if (iter == callback) { + LWIP_ASSERT("last != NULL", last != NULL); + last->next = callback->next; + callback->next = NULL; + return; + } + } + } +} + +/** + * Invoke extended netif status event + * @param netif netif that is affected by change + * @param reason change reason + * @param args depends on reason, see reason description + */ +void +netif_invoke_ext_callback(struct netif *netif, netif_nsc_reason_t reason, const netif_ext_callback_args_t *args) +{ + netif_ext_callback_t *callback = ext_callback; + + LWIP_ASSERT("netif must be != NULL", netif != NULL); + + while (callback != NULL) { + callback->callback_fn(netif, reason, args); + callback = callback->next; + } +} +#endif /* LWIP_NETIF_EXT_STATUS_CALLBACK */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c new file mode 100644 index 0000000..7579d86 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/pbuf.c @@ -0,0 +1,1514 @@ +/** + * @file + * Packet buffer management + */ + +/** + * @defgroup pbuf Packet buffers (PBUF) + * @ingroup infrastructure + * + * Packets are built from the pbuf data structure. It supports dynamic + * memory allocation for packet contents or can reference externally + * managed packet contents both in RAM and ROM. Quick allocation for + * incoming packets is provided through pools with fixed sized pbufs. + * + * A packet may span over multiple pbufs, chained as a singly linked + * list. This is called a "pbuf chain". + * + * Multiple packets may be queued, also using this singly linked list. + * This is called a "packet queue". + * + * So, a packet queue consists of one or more pbuf chains, each of + * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE + * NOT SUPPORTED!!! Use helper structs to queue multiple packets. + * + * The differences between a pbuf chain and a packet queue are very + * precise but subtle. + * + * The last pbuf of a packet has a ->tot_len field that equals the + * ->len field. It can be found by traversing the list. If the last + * pbuf of a packet has a ->next field other than NULL, more packets + * are on the queue. + * + * Therefore, looping through a pbuf of a single packet, has an + * loop end condition (tot_len == p->len), NOT (next == NULL). + * + * Example of custom pbuf usage: @ref zerocopyrx + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/stats.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/sys.h" +#include "lwip/netif.h" +#if LWIP_TCP && TCP_QUEUE_OOSEQ +#include "lwip/priv/tcp_priv.h" +#endif +#if LWIP_CHECKSUM_ON_COPY +#include "lwip/inet_chksum.h" +#endif + +#include + +#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) +/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically + aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ +#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) + +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset); + +#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ +#define PBUF_POOL_IS_EMPTY() +#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +#if !NO_SYS +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL +#include "lwip/tcpip.h" +#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ + if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \ + SYS_ARCH_PROTECT(old_level); \ + pbuf_free_ooseq_pending = 0; \ + SYS_ARCH_UNPROTECT(old_level); \ + } } while(0) +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +#endif /* !NO_SYS */ + +volatile u8_t pbuf_free_ooseq_pending; +#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() + +/** + * Attempt to reclaim some memory from queued out-of-sequence TCP segments + * if we run out of pool pbufs. It's better to give priority to new packets + * if we're running out. + * + * This must be done in the correct thread context therefore this function + * can only be used with NO_SYS=0 and through tcpip_callback. + */ +#if !NO_SYS +static +#endif /* !NO_SYS */ +void +pbuf_free_ooseq(void) +{ + struct tcp_pcb *pcb; + SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); + + for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { + if (pcb->ooseq != NULL) { + /** Free the ooseq pbufs of one PCB only */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); + tcp_free_ooseq(pcb); + return; + } + } +} + +#if !NO_SYS +/** + * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). + */ +static void +pbuf_free_ooseq_callback(void *arg) +{ + LWIP_UNUSED_ARG(arg); + pbuf_free_ooseq(); +} +#endif /* !NO_SYS */ + +/** Queue a call to pbuf_free_ooseq if not already queued. */ +static void +pbuf_pool_is_empty(void) +{ +#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL + SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); +#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ + u8_t queued; + SYS_ARCH_DECL_PROTECT(old_level); + SYS_ARCH_PROTECT(old_level); + queued = pbuf_free_ooseq_pending; + pbuf_free_ooseq_pending = 1; + SYS_ARCH_UNPROTECT(old_level); + + if (!queued) { + /* queue a call to pbuf_free_ooseq if not already queued */ + PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); + } +#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ +} +#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ + +/* Initialize members of struct pbuf after allocation */ +static void +pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags) +{ + p->next = NULL; + p->payload = payload; + p->tot_len = tot_len; + p->len = len; + p->type_internal = (u8_t)type; + p->flags = flags; + p->ref = 1; + p->if_idx = NETIF_NO_INDEX; +} + +/** + * @ingroup pbuf + * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param layer header size + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_RAM: buffer memory for pbuf is allocated as one large + * chunk. This includes protocol headers as well. + * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for + * protocol headers. Additional headers must be prepended + * by allocating another pbuf and chain in to the front of + * the ROM pbuf. It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: no buffer memory is allocated for the pbuf, even for + * protocol headers. It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from + * the pbuf pool that is allocated during pbuf_init(). + * + * @return the allocated pbuf. If multiple pbufs where allocated, this + * is the first pbuf of a pbuf chain. + */ +struct pbuf * +pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) +{ + struct pbuf *p; + u16_t offset = (u16_t)layer; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); + + switch (type) { + case PBUF_REF: /* fall through */ + case PBUF_ROM: + p = pbuf_alloc_reference(NULL, length, type); + break; + case PBUF_POOL: { + struct pbuf *q, *last; + u16_t rem_len; /* remaining length */ + p = NULL; + last = NULL; + rem_len = length; + do { + u16_t qlen; + q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); + if (q == NULL) { + PBUF_POOL_IS_EMPTY(); + /* free chain so far allocated */ + if (p) { + pbuf_free(p); + } + /* bail out unsuccessfully */ + return NULL; + } + qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset))); + pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)), + rem_len, qlen, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", + ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); + LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", + (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); + if (p == NULL) { + /* allocated head of pbuf chain (into p) */ + p = q; + } else { + /* make previous pbuf point to this pbuf */ + last->next = q; + } + last = q; + rem_len = (u16_t)(rem_len - qlen); + offset = 0; + } while (rem_len > 0); + break; + } + case PBUF_RAM: { + u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length)); + mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len); + + /* bug #50040: Check for integer overflow when calculating alloc_len */ + if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) || + (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) { + return NULL; + } + + /* If pbuf is to be allocated in RAM, allocate memory for it. */ + p = (struct pbuf *)mem_malloc(alloc_len); + if (p == NULL) { + return NULL; + } + pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)), + length, length, type, 0); + LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", + ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); + break; + } + default: + LWIP_ASSERT("pbuf_alloc: erroneous type", 0); + return NULL; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); + return p; +} + +/** + * @ingroup pbuf + * Allocates a pbuf for referenced data. + * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM). + * + * The actual memory allocated for the pbuf is determined by the + * layer at which the pbuf is allocated and the requested size + * (from the size parameter). + * + * @param payload referenced payload + * @param length size of the pbuf's payload + * @param type this parameter decides how and where the pbuf + * should be allocated as follows: + * + * - PBUF_ROM: It is assumed that the memory used is really + * similar to ROM in that it is immutable and will not be + * changed. Memory which is dynamic should generally not + * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. + * - PBUF_REF: It is assumed that the pbuf is only + * being used in a single thread. If the pbuf gets queued, + * then pbuf_take should be called to copy the buffer. + * + * @return the allocated pbuf. + */ +struct pbuf * +pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type) +{ + struct pbuf *p; + LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM)); + /* only allocate memory for the pbuf structure */ + p = (struct pbuf *)memp_malloc(MEMP_PBUF); + if (p == NULL) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n", + (type == PBUF_ROM) ? "ROM" : "REF")); + return NULL; + } + pbuf_init_alloced_pbuf(p, payload, length, length, type, 0); + return p; +} + + +#if LWIP_SUPPORT_CUSTOM_PBUF +/** + * @ingroup pbuf + * Initialize a custom pbuf (already allocated). + * Example of custom pbuf usage: @ref zerocopyrx + * + * @param l header size + * @param length size of the pbuf's payload + * @param type type of the pbuf (only used to treat the pbuf accordingly, as + * this function allocates no memory) + * @param p pointer to the custom pbuf to initialize (already allocated) + * @param payload_mem pointer to the buffer that is used for payload and headers, + * must be at least big enough to hold 'length' plus the header size, + * may be NULL if set later. + * ATTENTION: The caller is responsible for correct alignment of this buffer!! + * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least + * big enough to hold 'length' plus the header size + */ +struct pbuf * +pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, + void *payload_mem, u16_t payload_mem_len) +{ + u16_t offset = (u16_t)l; + void *payload; + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); + + if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); + return NULL; + } + + if (payload_mem != NULL) { + payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); + } else { + payload = NULL; + } + pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM); + return &p->pbuf; +} +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + +/** + * @ingroup pbuf + * Shrink a pbuf chain to a desired length. + * + * @param p pbuf to shrink. + * @param new_len desired new length of pbuf chain + * + * Depending on the desired length, the first few pbufs in a chain might + * be skipped and left unchanged. The new last pbuf in the chain will be + * resized, and any remaining pbufs will be freed. + * + * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. + * @note May not be called on a packet queue. + * + * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). + */ +void +pbuf_realloc(struct pbuf *p, u16_t new_len) +{ + struct pbuf *q; + u16_t rem_len; /* remaining length */ + u16_t shrink; + + LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); + + /* desired length larger than current length? */ + if (new_len >= p->tot_len) { + /* enlarging not yet supported */ + return; + } + + /* the pbuf chain grows by (new_len - p->tot_len) bytes + * (which may be negative in case of shrinking) */ + shrink = (u16_t)(p->tot_len - new_len); + + /* first, step over any pbufs that should remain in the chain */ + rem_len = new_len; + q = p; + /* should this pbuf be kept? */ + while (rem_len > q->len) { + /* decrease remaining length by pbuf length */ + rem_len = (u16_t)(rem_len - q->len); + /* decrease total length indicator */ + q->tot_len = (u16_t)(q->tot_len - shrink); + /* proceed to next pbuf in chain */ + q = q->next; + LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); + } + /* we have now reached the new last pbuf (in q) */ + /* rem_len == desired length for pbuf q */ + + /* shrink allocated memory for PBUF_RAM */ + /* (other types merely adjust their length fields */ + if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len) +#if LWIP_SUPPORT_CUSTOM_PBUF + && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + ) { + /* reallocate and adjust the length of the pbuf that will be split */ + q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len)); + LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); + } + /* adjust length fields for new last pbuf */ + q->len = rem_len; + q->tot_len = q->len; + + /* any remaining pbufs in chain? */ + if (q->next != NULL) { + /* free remaining pbufs in chain */ + pbuf_free(q->next); + } + /* q is last packet in chain */ + q->next = NULL; + +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * @see pbuf_add_header. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size. + * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types + * + * @return non-zero on failure, zero on success. + * + */ +static u8_t +pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force) +{ + u16_t type_internal; + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_increment > 0xFFFF)) { + return 1; + } + if (header_size_increment == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_increment; + /* Do not allow tot_len to wrap as a result. */ + if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) { + return 1; + } + + type_internal = p->type_internal; + + /* pbuf types containing payloads? */ + if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) { + /* set new payload pointer */ + payload = (u8_t *)p->payload - header_size_increment; + /* boundary check fails? */ + if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n", + (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); + /* bail out unsuccessfully */ + return 1; + } + /* pbuf types referring to external payloads? */ + } else { + /* hide a header in the payload? */ + if (force) { + payload = (u8_t *)p->payload - header_size_increment; + } else { + /* cannot expand payload to front (yet!) + * bail out unsuccessfully */ + return 1; + } + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n", + (void *)p->payload, (void *)payload, increment_magnitude)); + + /* modify pbuf fields */ + p->payload = payload; + p->len = (u16_t)(p->len + increment_magnitude); + p->tot_len = (u16_t)(p->tot_len + increment_magnitude); + + + return 0; +} + +/** + * Adjusts the payload pointer to reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_add_header(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 0); +} + +/** + * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_add_header_force(struct pbuf *p, size_t header_size_increment) +{ + return pbuf_add_header_impl(p, header_size_increment, 1); +} + +/** + * Adjusts the payload pointer to hide headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * disappears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_decrement Number of bytes to decrement header size which + * decreases the size of the pbuf. + * If header_size_decrement is 0, this function does nothing and returns successful. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_remove_header(struct pbuf *p, size_t header_size_decrement) +{ + void *payload; + u16_t increment_magnitude; + + LWIP_ASSERT("p != NULL", p != NULL); + if ((p == NULL) || (header_size_decrement > 0xFFFF)) { + return 1; + } + if (header_size_decrement == 0) { + return 0; + } + + increment_magnitude = (u16_t)header_size_decrement; + /* Check that we aren't going to move off the end of the pbuf */ + LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); + + /* remember current payload pointer */ + payload = p->payload; + LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */ + + /* increase payload pointer (guarded by length check above) */ + p->payload = (u8_t *)p->payload + header_size_decrement; + /* modify pbuf length fields */ + p->len = (u16_t)(p->len - increment_magnitude); + p->tot_len = (u16_t)(p->tot_len - increment_magnitude); + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n", + (void *)payload, (void *)p->payload, increment_magnitude)); + + return 0; +} + +static u8_t +pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) +{ + if (header_size_increment < 0) { + return pbuf_remove_header(p, (size_t) - header_size_increment); + } else { + return pbuf_add_header_impl(p, (size_t)header_size_increment, force); + } +} + +/** + * Adjusts the payload pointer to hide or reveal headers in the payload. + * + * Adjusts the ->payload pointer so that space for a header + * (dis)appears in the pbuf payload. + * + * The ->payload, ->tot_len and ->len fields are adjusted. + * + * @param p pbuf to change the header size. + * @param header_size_increment Number of bytes to increment header size which + * increases the size of the pbuf. New space is on the front. + * (Using a negative value decreases the header size.) + * If header_size_increment is 0, this function does nothing and returns successful. + * + * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so + * the call will fail. A check is made that the increase in header size does + * not move the payload pointer in front of the start of the buffer. + * @return non-zero on failure, zero on success. + * + */ +u8_t +pbuf_header(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 0); +} + +/** + * Same as pbuf_header but does not check if 'header_size > 0' is allowed. + * This is used internally only, to allow PBUF_REF for RX. + */ +u8_t +pbuf_header_force(struct pbuf *p, s16_t header_size_increment) +{ + return pbuf_header_impl(p, header_size_increment, 1); +} + +/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len) + * + * @param q pbufs to operate on + * @param size The number of bytes to remove from the beginning of the pbuf list. + * While size >= p->len, pbufs are freed. + * ATTENTION: this is the opposite direction as @ref pbuf_header, but + * takes an u16_t not s16_t! + * @return the new head pbuf + */ +struct pbuf * +pbuf_free_header(struct pbuf *q, u16_t size) +{ + struct pbuf *p = q; + u16_t free_left = size; + while (free_left && p) { + if (free_left >= p->len) { + struct pbuf *f = p; + free_left = (u16_t)(free_left - p->len); + p = p->next; + f->next = 0; + pbuf_free(f); + } else { + pbuf_remove_header(p, free_left); + free_left = 0; + } + } + return p; +} + +/** + * @ingroup pbuf + * Dereference a pbuf chain or queue and deallocate any no-longer-used + * pbufs at the head of this chain or queue. + * + * Decrements the pbuf reference count. If it reaches zero, the pbuf is + * deallocated. + * + * For a pbuf chain, this is repeated for each pbuf in the chain, + * up to the first pbuf which has a non-zero reference count after + * decrementing. So, when all reference counts are one, the whole + * chain is free'd. + * + * @param p The pbuf (chain) to be dereferenced. + * + * @return the number of pbufs that were de-allocated + * from the head of the chain. + * + * @note MUST NOT be called on a packet queue (Not verified to work yet). + * @note the reference counter of a pbuf equals the number of pointers + * that refer to the pbuf (or into the pbuf). + * + * @internal examples: + * + * Assuming existing chains a->b->c with the following reference + * counts, calling pbuf_free(a) results in: + * + * 1->2->3 becomes ...1->3 + * 3->3->3 becomes 2->3->3 + * 1->1->2 becomes ......1 + * 2->1->1 becomes 1->1->1 + * 1->1->1 becomes ....... + * + */ +u8_t +pbuf_free(struct pbuf *p) +{ + u8_t alloc_src; + struct pbuf *q; + u8_t count; + + if (p == NULL) { + LWIP_ASSERT("p != NULL", p != NULL); + /* if assertions are disabled, proceed with debug output */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("pbuf_free(p == NULL) was called.\n")); + return 0; + } + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); + + PERF_START; + + count = 0; + /* de-allocate all consecutive pbufs from the head of the chain that + * obtain a zero reference count after decrementing*/ + while (p != NULL) { + LWIP_PBUF_REF_T ref; + SYS_ARCH_DECL_PROTECT(old_level); + /* Since decrementing ref cannot be guaranteed to be a single machine operation + * we must protect it. We put the new ref into a local variable to prevent + * further protection. */ + SYS_ARCH_PROTECT(old_level); + /* all pbufs in a chain are referenced at least once */ + LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); + /* decrease reference count (number of pointers to pbuf) */ + ref = --(p->ref); + SYS_ARCH_UNPROTECT(old_level); + /* this pbuf is no longer referenced to? */ + if (ref == 0) { + /* remember next pbuf in chain for next iteration */ + q = p->next; + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); + alloc_src = pbuf_get_allocsrc(p); +#if LWIP_SUPPORT_CUSTOM_PBUF + /* is this a custom pbuf? */ + if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { + struct pbuf_custom *pc = (struct pbuf_custom *)p; + LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); + pc->custom_free_function(p); + } else +#endif /* LWIP_SUPPORT_CUSTOM_PBUF */ + { + /* is this a pbuf from the pool? */ + if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) { + memp_free(MEMP_PBUF_POOL, p); + /* is this a ROM or RAM referencing pbuf? */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) { + memp_free(MEMP_PBUF, p); + /* type == PBUF_RAM */ + } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) { + mem_free(p); + } else { + /* @todo: support freeing other types */ + LWIP_ASSERT("invalid pbuf type", 0); + } + } + count++; + /* proceed to next pbuf */ + p = q; + /* p->ref > 0, this pbuf is still referenced to */ + /* (and so the remaining pbufs in chain as well) */ + } else { + LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref)); + /* stop walking through the chain */ + p = NULL; + } + } + PERF_STOP("pbuf_free"); + /* return number of de-allocated pbufs */ + return count; +} + +/** + * Count number of pbufs in a chain + * + * @param p first pbuf of chain + * @return the number of pbufs in a chain + */ +u16_t +pbuf_clen(const struct pbuf *p) +{ + u16_t len; + + len = 0; + while (p != NULL) { + ++len; + p = p->next; + } + return len; +} + +/** + * @ingroup pbuf + * Increment the reference count of the pbuf. + * + * @param p pbuf to increase reference counter of + * + */ +void +pbuf_ref(struct pbuf *p) +{ + /* pbuf given? */ + if (p != NULL) { + SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1)); + LWIP_ASSERT("pbuf ref overflow", p->ref > 0); + } +} + +/** + * @ingroup pbuf + * Concatenate two pbufs (each may be a pbuf chain) and take over + * the caller's reference of the tail pbuf. + * + * @note The caller MAY NOT reference the tail pbuf afterwards. + * Use pbuf_chain() for that purpose. + * + * This function explicitly does not check for tot_len overflow to prevent + * failing to queue too long pbufs. This can produce invalid pbufs, so + * handle with care! + * + * @see pbuf_chain() + */ +void +pbuf_cat(struct pbuf *h, struct pbuf *t) +{ + struct pbuf *p; + + LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", + ((h != NULL) && (t != NULL)), return;); + + /* proceed to last pbuf of chain */ + for (p = h; p->next != NULL; p = p->next) { + /* add total length of second chain to all totals of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + } + /* { p is last pbuf of first h chain, p->next == NULL } */ + LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); + LWIP_ASSERT("p->next == NULL", p->next == NULL); + /* add total length of second chain to last pbuf total of first chain */ + p->tot_len = (u16_t)(p->tot_len + t->tot_len); + /* chain last pbuf of head (p) with first of tail (t) */ + p->next = t; + /* p->next now references t, but the caller will drop its reference to t, + * so netto there is no change to the reference count of t. + */ +} + +/** + * @ingroup pbuf + * Chain two pbufs (or pbuf chains) together. + * + * The caller MUST call pbuf_free(t) once it has stopped + * using it. Use pbuf_cat() instead if you no longer use t. + * + * @param h head pbuf (chain) + * @param t tail pbuf (chain) + * @note The pbufs MUST belong to the same packet. + * @note MAY NOT be called on a packet queue. + * + * The ->tot_len fields of all pbufs of the head chain are adjusted. + * The ->next field of the last pbuf of the head chain is adjusted. + * The ->ref field of the first pbuf of the tail chain is adjusted. + * + */ +void +pbuf_chain(struct pbuf *h, struct pbuf *t) +{ + pbuf_cat(h, t); + /* t is now referenced by h */ + pbuf_ref(t); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); +} + +/** + * Dechains the first pbuf from its succeeding pbufs in the chain. + * + * Makes p->tot_len field equal to p->len. + * @param p pbuf to dechain + * @return remainder of the pbuf chain, or NULL if it was de-allocated. + * @note May not be called on a packet queue. + */ +struct pbuf * +pbuf_dechain(struct pbuf *p) +{ + struct pbuf *q; + u8_t tail_gone = 1; + /* tail */ + q = p->next; + /* pbuf has successor in chain? */ + if (q != NULL) { + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); + /* enforce invariant if assertion is disabled */ + q->tot_len = (u16_t)(p->tot_len - p->len); + /* decouple pbuf from remainder */ + p->next = NULL; + /* total length of pbuf p is its own length only */ + p->tot_len = p->len; + /* q is no longer referenced by p, free it */ + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); + tail_gone = pbuf_free(q); + if (tail_gone > 0) { + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, + ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); + } + /* return remaining tail or NULL if deallocated */ + } + /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ + LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); + return ((tail_gone > 0) ? NULL : q); +} + +/** + * @ingroup pbuf + * Create PBUF_RAM copies of pbufs. + * + * Used to queue packets on behalf of the lwIP stack, such as + * ARP based queueing. + * + * @note You MUST explicitly use p = pbuf_take(p); + * + * @note Only one packet is copied, no packet queue! + * + * @param p_to pbuf destination of the copy + * @param p_from pbuf source of the copy + * + * @return ERR_OK if pbuf was copied + * ERR_ARG if one of the pbufs is NULL or p_to is not big + * enough to hold p_from + */ +err_t +pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) +{ + size_t offset_to = 0, offset_from = 0, len; + + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", + (const void *)p_to, (const void *)p_from)); + + /* is the target big enough to hold the source? */ + LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && + (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); + + /* iterate through pbuf chain */ + do { + /* copy one part of the original chain */ + if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { + /* complete current p_from fits into current p_to */ + len = p_from->len - offset_from; + } else { + /* current p_from does not fit into current p_to */ + len = p_to->len - offset_to; + } + MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len); + offset_to += len; + offset_from += len; + LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); + LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); + if (offset_from >= p_from->len) { + /* on to next p_from (if any) */ + offset_from = 0; + p_from = p_from->next; + } + if (offset_to == p_to->len) { + /* on to next p_to (if any) */ + offset_to = 0; + p_to = p_to->next; + LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;); + } + + if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_from->next == NULL), return ERR_VAL;); + } + if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { + /* don't copy more than one packet! */ + LWIP_ERROR("pbuf_copy() does not allow packet queues!", + (p_to->next == NULL), return ERR_VAL;); + } + } while (p_from); + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Copy (part of) the contents of a packet buffer + * to an application supplied buffer. + * + * @param buf the pbuf from which to copy data + * @param dataptr the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +u16_t +pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) +{ + const struct pbuf *p; + u16_t left = 0; + u16_t buf_copy_len; + u16_t copied_total = 0; + + LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); + LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; len != 0 && p != NULL; p = p->next) { + if ((offset != 0) && (offset >= p->len)) { + /* don't copy from this buffer -> on to the next */ + offset = (u16_t)(offset - p->len); + } else { + /* copy from this buffer. maybe only partially. */ + buf_copy_len = (u16_t)(p->len - offset); + if (buf_copy_len > len) { + buf_copy_len = len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len); + copied_total = (u16_t)(copied_total + buf_copy_len); + left = (u16_t)(left + buf_copy_len); + len = (u16_t)(len - buf_copy_len); + offset = 0; + } + } + return copied_total; +} + +/** + * @ingroup pbuf + * Get part of a pbuf's payload as contiguous memory. The returned memory is + * either a pointer into the pbuf's payload or, if split over multiple pbufs, + * a copy into the user-supplied buffer. + * + * @param p the pbuf from which to copy data + * @param buffer the application supplied buffer + * @param bufsize size of the application supplied buffer + * @param len length of data to copy (dataptr must be big enough). No more + * than buf->tot_len will be copied, irrespective of len + * @param offset offset into the packet buffer from where to begin copying len bytes + * @return the number of bytes copied, or 0 on failure + */ +void * +pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset) +{ + const struct pbuf *q; + u16_t out_offset; + + LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;); + LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;); + + q = pbuf_skip_const(p, offset, &out_offset); + if (q != NULL) { + if (q->len >= (out_offset + len)) { + /* all data in this pbuf, return zero-copy */ + return (u8_t *)q->payload + out_offset; + } + /* need to copy */ + if (pbuf_copy_partial(q, buffer, len, out_offset) != len) { + /* copying failed: pbuf is too short */ + return NULL; + } + return buffer; + } + /* pbuf is too short (offset does not fit in) */ + return NULL; +} + +#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE +/** + * This method modifies a 'pbuf chain', so that its total length is + * smaller than 64K. The remainder of the original pbuf chain is stored + * in *rest. + * This function never creates new pbufs, but splits an existing chain + * in two parts. The tot_len of the modified packet queue will likely be + * smaller than 64K. + * 'packet queues' are not supported by this function. + * + * @param p the pbuf queue to be split + * @param rest pointer to store the remainder (after the first 64K) + */ +void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) +{ + *rest = NULL; + if ((p != NULL) && (p->next != NULL)) { + u16_t tot_len_front = p->len; + struct pbuf *i = p; + struct pbuf *r = p->next; + + /* continue until the total length (summed up as u16_t) overflows */ + while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) { + tot_len_front = (u16_t)(tot_len_front + r->len); + i = r; + r = r->next; + } + /* i now points to last packet of the first segment. Set next + pointer to NULL */ + i->next = NULL; + + if (r != NULL) { + /* Update the tot_len field in the first part */ + for (i = p; i != NULL; i = i->next) { + i->tot_len = (u16_t)(i->tot_len - r->tot_len); + LWIP_ASSERT("tot_len/len mismatch in last pbuf", + (i->next != NULL) || (i->tot_len == i->len)); + } + if (p->flags & PBUF_FLAG_TCP_FIN) { + r->flags |= PBUF_FLAG_TCP_FIN; + } + + /* tot_len field in rest does not need modifications */ + /* reference counters do not need modifications */ + *rest = r; + } + } +} +#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + +/* Actual implementation of pbuf_skip() but returning const pointer... */ +static const struct pbuf * +pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + u16_t offset_left = in_offset; + const struct pbuf *q = in; + + /* get the correct pbuf */ + while ((q != NULL) && (q->len <= offset_left)) { + offset_left = (u16_t)(offset_left - q->len); + q = q->next; + } + if (out_offset != NULL) { + *out_offset = offset_left; + } + return q; +} + +/** + * @ingroup pbuf + * Skip a number of bytes at the start of a pbuf + * + * @param in input pbuf + * @param in_offset offset to skip + * @param out_offset resulting offset in the returned pbuf + * @return the pbuf in the queue where the offset is + */ +struct pbuf * +pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset) +{ + const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset); + return LWIP_CONST_CAST(struct pbuf *, out); +} + +/** + * @ingroup pbuf + * Copy application supplied data into a pbuf. + * This function can only be used to copy the equivalent of buf->tot_len data. + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) +{ + struct pbuf *p; + size_t buf_copy_len; + size_t total_copy_len = len; + size_t copied_total = 0; + + LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); + LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); + + if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { + return ERR_ARG; + } + + /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ + for (p = buf; total_copy_len != 0; p = p->next) { + LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); + buf_copy_len = total_copy_len; + if (buf_copy_len > p->len) { + /* this pbuf cannot hold all remaining data */ + buf_copy_len = p->len; + } + /* copy the necessary parts of the buffer */ + MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len); + total_copy_len -= buf_copy_len; + copied_total += buf_copy_len; + } + LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); + return ERR_OK; +} + +/** + * @ingroup pbuf + * Same as pbuf_take() but puts data at an offset + * + * @param buf pbuf to fill with data + * @param dataptr application supplied data buffer + * @param len length of the application supplied data buffer + * @param offset offset in pbuf where to copy dataptr to + * + * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough + */ +err_t +pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) +{ + u16_t target_offset; + struct pbuf *q = pbuf_skip(buf, offset, &target_offset); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->tot_len >= target_offset + len)) { + u16_t remaining_len = len; + const u8_t *src_ptr = (const u8_t *)dataptr; + /* copy the part that goes into the first pbuf */ + u16_t first_copy_len; + LWIP_ASSERT("check pbuf_skip result", target_offset < q->len); + first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len); + MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len); + remaining_len = (u16_t)(remaining_len - first_copy_len); + src_ptr += first_copy_len; + if (remaining_len > 0) { + return pbuf_take(q->next, src_ptr, remaining_len); + } + return ERR_OK; + } + return ERR_MEM; +} + +/** + * @ingroup pbuf + * Creates a single pbuf out of a queue of pbufs. + * + * @remark: Either the source pbuf 'p' is freed by this function or the original + * pbuf 'p' is returned, therefore the caller has to check the result! + * + * @param p the source pbuf + * @param layer pbuf_layer of the new pbuf + * + * @return a new, single pbuf (p->next is NULL) + * or the old pbuf if allocation fails + */ +struct pbuf * +pbuf_coalesce(struct pbuf *p, pbuf_layer layer) +{ + struct pbuf *q; + if (p->next == NULL) { + return p; + } + q = pbuf_clone(layer, PBUF_RAM, p); + if (q == NULL) { + /* @todo: what do we do now? */ + return p; + } + pbuf_free(p); + return q; +} + +/** + * @ingroup pbuf + * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source + * pbuf into this new pbuf (using pbuf_copy()). + * + * @param layer pbuf_layer of the new pbuf + * @param type this parameter decides how and where the pbuf should be allocated + * (@see pbuf_alloc()) + * @param p the source pbuf + * + * @return a new pbuf or NULL if allocation fails + */ +struct pbuf * +pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p) +{ + struct pbuf *q; + err_t err; + q = pbuf_alloc(layer, p->tot_len, type); + if (q == NULL) { + return NULL; + } + err = pbuf_copy(q, p); + LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ + LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); + return q; +} + +#if LWIP_CHECKSUM_ON_COPY +/** + * Copies data into a single pbuf (*not* into a pbuf queue!) and updates + * the checksum while copying + * + * @param p the pbuf to copy data into + * @param start_offset offset of p->payload where to copy the data to + * @param dataptr data to copy into the pbuf + * @param len length of data to copy into the pbuf + * @param chksum pointer to the checksum which is updated + * @return ERR_OK if successful, another error if the data does not fit + * within the (first) pbuf (no pbuf queues!) + */ +err_t +pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, + u16_t len, u16_t *chksum) +{ + u32_t acc; + u16_t copy_chksum; + char *dst_ptr; + LWIP_ASSERT("p != NULL", p != NULL); + LWIP_ASSERT("dataptr != NULL", dataptr != NULL); + LWIP_ASSERT("chksum != NULL", chksum != NULL); + LWIP_ASSERT("len != 0", len != 0); + + if ((start_offset >= p->len) || (start_offset + len > p->len)) { + return ERR_ARG; + } + + dst_ptr = ((char *)p->payload) + start_offset; + copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); + if ((start_offset & 1) != 0) { + copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); + } + acc = *chksum; + acc += copy_chksum; + *chksum = FOLD_U32T(acc); + return ERR_OK; +} +#endif /* LWIP_CHECKSUM_ON_COPY */ + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * WARNING: returns zero for offset >= p->tot_len + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len + */ +u8_t +pbuf_get_at(const struct pbuf *p, u16_t offset) +{ + int ret = pbuf_try_get_at(p, offset); + if (ret >= 0) { + return (u8_t)ret; + } + return 0; +} + +/** + * @ingroup pbuf + * Get one byte from the specified position in a pbuf + * + * @param p pbuf to parse + * @param offset offset into p of the byte to return + * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len + */ +int +pbuf_try_get_at(const struct pbuf *p, u16_t offset) +{ + u16_t q_idx; + const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx); + + /* return requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + return ((u8_t *)q->payload)[q_idx]; + } + return -1; +} + +/** + * @ingroup pbuf + * Put one byte to the specified position in a pbuf + * WARNING: silently ignores offset >= p->tot_len + * + * @param p pbuf to fill + * @param offset offset into p of the byte to write + * @param data byte to write at an offset into p + */ +void +pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data) +{ + u16_t q_idx; + struct pbuf *q = pbuf_skip(p, offset, &q_idx); + + /* write requested data if pbuf is OK */ + if ((q != NULL) && (q->len > q_idx)) { + ((u8_t *)q->payload)[q_idx] = data; + } +} + +/** + * @ingroup pbuf + * Compare pbuf contents at specified offset with memory s2, both of length n + * + * @param p pbuf to compare + * @param offset offset into p at which to start comparing + * @param s2 buffer to compare + * @param n length of buffer to compare + * @return zero if equal, nonzero otherwise + * (0xffff if p is too short, diffoffset+1 otherwise) + */ +u16_t +pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n) +{ + u16_t start = offset; + const struct pbuf *q = p; + u16_t i; + + /* pbuf long enough to perform check? */ + if (p->tot_len < (offset + n)) { + return 0xffff; + } + + /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ + while ((q != NULL) && (q->len <= start)) { + start = (u16_t)(start - q->len); + q = q->next; + } + + /* return requested data if pbuf is OK */ + for (i = 0; i < n; i++) { + /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ + u8_t a = pbuf_get_at(q, (u16_t)(start + i)); + u8_t b = ((const u8_t *)s2)[i]; + if (a != b) { + return (u16_t)LWIP_MIN(i + 1, 0xFFFF); + } + } + return 0; +} + +/** + * @ingroup pbuf + * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset + * start_offset. + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param mem search for the contents of this buffer + * @param mem_len length of 'mem' + * @param start_offset offset into p at which to start searching + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset) +{ + u16_t i; + u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len); + if (p->tot_len >= mem_len + start_offset) { + for (i = start_offset; i <= max_cmp_start; i++) { + u16_t plus = pbuf_memcmp(p, i, mem, mem_len); + if (plus == 0) { + return i; + } + } + } + return 0xFFFF; +} + +/** + * Find occurrence of substr with length substr_len in pbuf p, start at offset + * start_offset + * WARNING: in contrast to strstr(), this one does not stop at the first \0 in + * the pbuf/source string! + * + * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as + * return value 'not found' + * @param substr string to search for in p, maximum length is 0xFFFE + * @return 0xFFFF if substr was not found in p or the index where it was found + */ +u16_t +pbuf_strstr(const struct pbuf *p, const char *substr) +{ + size_t substr_len; + if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { + return 0xFFFF; + } + substr_len = strlen(substr); + if (substr_len >= 0xFFFF) { + return 0xFFFF; + } + return pbuf_memfind(p, substr, (u16_t)substr_len, 0); +} diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c new file mode 100644 index 0000000..7a42432 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/raw.c @@ -0,0 +1,671 @@ +/** + * @file + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * See also @ref raw_raw + * + * @defgroup raw_raw RAW + * @ingroup callbackstyle_api + * Implementation of raw protocol PCBs for low-level handling of + * different types of protocols besides (or overriding) those + * already available in lwIP.\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/raw.h" +#include "lwip/priv/raw_priv.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/inet_chksum.h" + +#include + +/** The list of RAW PCBs */ +static struct raw_pcb *raw_pcbs; + +static u8_t +raw_input_local_match(struct raw_pcb *pcb, u8_t broadcast) +{ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + +#if LWIP_IPV4 && LWIP_IPV6 + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* IP_SOF_BROADCAST_RECV */ + return 1; + } +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: receive all broadcasts + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: catch all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Determine if in incoming IP packet is covered by a RAW PCB + * and if so, pass it to a user-provided receive callback function. + * + * Given an incoming IP datagram (as a chain of pbufs) this function + * finds a corresponding RAW PCB and calls the corresponding receive + * callback function. + * + * @param p pbuf to be demultiplexed to a RAW PCB. + * @param inp network interface on which the datagram was received. + * @return - 1 if the packet has been eaten by a RAW PCB receive + * callback function. The caller MAY NOT not reference the + * packet any longer, and MAY NOT call pbuf_free(). + * @return - 0 if packet is not eaten (pbuf is still referenced by the + * caller). + * + */ +raw_input_state_t +raw_input(struct pbuf *p, struct netif *inp) +{ + struct raw_pcb *pcb, *prev; + s16_t proto; + raw_input_state_t ret = RAW_INPUT_NONE; + u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_UNUSED_ARG(inp); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_HDR_GET_VERSION(p->payload) == 6) +#endif /* LWIP_IPV4 */ + { + struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; + proto = IP6H_NEXTH(ip6hdr); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + proto = IPH_PROTO((struct ip_hdr *)p->payload); + } +#endif /* LWIP_IPV4 */ + + prev = NULL; + pcb = raw_pcbs; + /* loop through all raw pcbs until the packet is eaten by one */ + /* this allows multiple pcbs to match against the packet by design */ + while (pcb != NULL) { + if ((pcb->protocol == proto) && raw_input_local_match(pcb, broadcast) && + (((pcb->flags & RAW_FLAGS_CONNECTED) == 0) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* receive callback function available? */ + if (pcb->recv != NULL) { + u8_t eaten; +#ifndef LWIP_NOASSERT + void *old_payload = p->payload; +#endif + ret = RAW_INPUT_DELIVERED; + /* the receive callback function did not eat the packet? */ + eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); + if (eaten != 0) { + /* receive function ate the packet */ + p = NULL; + if (prev != NULL) { + /* move the pcb to the front of raw_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return RAW_INPUT_EATEN; + } else { + /* sanity-check that the receive callback did not alter the pbuf */ + LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", + p->payload == old_payload); + } + } + /* no receive callback function was set for this raw PCB */ + } + /* drop the packet */ + prev = pcb; + pcb = pcb->next; + } + return ret; +} + +/** + * @ingroup raw_raw + * Bind a RAW PCB. + * + * @param pcb RAW PCB to be bound with a local address ipaddr. + * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to + * bind to all local interfaces. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified IP address is already bound to by + * another RAW PCB. + * + * @see raw_disconnect() + */ +err_t +raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. */ + if (IP_IS_V6(&pcb->local_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->local_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->local_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Bind an RAW PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb RAW PCB to be bound with netif. + * @param netif netif to bind to. Can be NULL. + * + * @see raw_disconnect() + */ +void +raw_bind_netif(struct raw_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup raw_raw + * Connect an RAW PCB. This function is required by upper layers + * of lwip. Using the raw api you could use raw_sendto() instead + * + * This will associate the RAW PCB with the remote address. + * + * @param pcb RAW PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * + * @return lwIP error code + * + * @see raw_disconnect() and raw_sendto() + */ +err_t +raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb == NULL) || (ipaddr == NULL)) { + return ERR_VAL; + } + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + raw_set_flags(pcb, RAW_FLAGS_CONNECTED); + return ERR_OK; +} + +/** + * @ingroup raw_raw + * Disconnect a RAW PCB. + * + * @param pcb the raw pcb to disconnect. + */ +void +raw_disconnect(struct raw_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + raw_clear_flags(pcb, RAW_FLAGS_CONNECTED); +} + +/** + * @ingroup raw_raw + * Set the callback function for received packets that match the + * raw PCB's protocol and binding. + * + * The callback function MUST either + * - eat the packet by calling pbuf_free() and returning non-zero. The + * packet will not be passed to other raw PCBs or other protocol layers. + * - not free the packet, and return zero. The packet will be matched + * against further PCBs and/or forwarded to another protocol layers. + */ +void +raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address. An IP header will be prepended + * to the packet, unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that + * case, the packet must include an IP header, which will then be sent as is. + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * @param ipaddr the destination address of the IP packet + * + */ +err_t +raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) +{ + struct netif *netif; + const ip_addr_t *src_ip; + + if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { + return ERR_VAL; + } + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(ipaddr)) { + /* For multicast-destined packets, use the user-provided interface index to + * determine the outgoing interface, if an interface index is set and a + * matching netif can be found. Otherwise, fall back to regular routing. */ + netif = netif_get_by_index(pcb->mcast_ifindex); + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + netif = ip_route(&pcb->local_ip, ipaddr); + } + } + + if (netif == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); + ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); + return ERR_RTE; + } + + if (ip_addr_isany(&pcb->local_ip) || ip_addr_ismulticast(&pcb->local_ip)) { + /* use outgoing network interface IP address as source address */ + src_ip = ip_netif_get_local_ip(netif, ipaddr); +#if LWIP_IPV6 + if (src_ip == NULL) { + return ERR_RTE; + } +#endif /* LWIP_IPV6 */ + } else { + /* use RAW PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } + + return raw_sendto_if_src(pcb, p, ipaddr, netif, src_ip); +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the given address, using a particular outgoing + * netif and source IP address. An IP header will be prepended to the packet, + * unless the RAW_FLAGS_HDRINCL flag is set on the PCB. In that case, the + * packet must include an IP header, which will then be sent as is. + * + * @param pcb RAW PCB used to send the data + * @param p chain of pbufs to be sent + * @param dst_ip destination IP address + * @param netif the netif used for sending + * @param src_ip source IP address + */ +err_t +raw_sendto_if_src(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + struct netif *netif, const ip_addr_t *src_ip) +{ + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u16_t header_size; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + if ((pcb == NULL) || (dst_ip == NULL) || (netif == NULL) || (src_ip == NULL) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + header_size = ( +#if LWIP_IPV4 && LWIP_IPV6 + IP_IS_V6(dst_ip) ? IP6_HLEN : IP_HLEN); +#elif LWIP_IPV4 + IP_HLEN); +#else + IP6_HLEN); +#endif + + /* Handle the HDRINCL option as an exception: none of the code below applies + * to this case, and sending the packet needs to be done differently too. */ + if (pcb->flags & RAW_FLAGS_HDRINCL) { + /* A full header *must* be present in the first pbuf of the chain, as the + * output routines may access its fields directly. */ + if (p->len < header_size) { + return ERR_VAL; + } + /* @todo multicast loop support, if at all desired for this scenario.. */ + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if_hdrincl(p, src_ip, dst_ip, netif); + NETIF_RESET_HINTS(netif); + return err; + } + + /* packet too large to add an IP header without causing an overflow? */ + if ((u16_t)(p->tot_len + header_size) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an IP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, header_size)) { + /* allocate header in new pbuf */ + q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p */ + pbuf_chain(q, p); + } + /* { first pbuf q points to header pbuf } */ + LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* first pbuf q equals given pbuf */ + q = p; + if (pbuf_remove_header(q, header_size)) { + LWIP_ASSERT("Can't restore header we just removed!", 0); + return ERR_MEM; + } + } + +#if IP_SOF_BROADCAST + if (IP_IS_V4(dst_ip)) { + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + /* free any temporary header pbuf allocated by pbuf_header() */ + if (q != p) { + pbuf_free(q); + } + return ERR_VAL; + } + } +#endif /* IP_SOF_BROADCAST */ + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & RAW_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#if LWIP_IPV6 + /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, + compute the checksum and update the checksum in the payload. */ + if (IP_IS_V6(dst_ip) && pcb->chksum_reqd) { + u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(dst_ip)); + LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); + SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); + } +#endif + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? raw_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + NETIF_SET_HINTS(netif, &pcb->netif_hints); + err = ip_output_if(q, src_ip, dst_ip, ttl, pcb->tos, pcb->protocol, netif); + NETIF_RESET_HINTS(netif); + + /* did we chain a header earlier? */ + if (q != p) { + /* free the header */ + pbuf_free(q); + } + return err; +} + +/** + * @ingroup raw_raw + * Send the raw IP packet to the address given by raw_connect() + * + * @param pcb the raw pcb which to send + * @param p the IP payload to send + * + */ +err_t +raw_send(struct raw_pcb *pcb, struct pbuf *p) +{ + return raw_sendto(pcb, p, &pcb->remote_ip); +} + +/** + * @ingroup raw_raw + * Remove an RAW PCB. + * + * @param pcb RAW PCB to be removed. The PCB is removed from the list of + * RAW PCB's and the data structure is freed from memory. + * + * @see raw_new() + */ +void +raw_remove(struct raw_pcb *pcb) +{ + struct raw_pcb *pcb2; + LWIP_ASSERT_CORE_LOCKED(); + /* pcb to be removed is first in list? */ + if (raw_pcbs == pcb) { + /* make list start at 2nd pcb */ + raw_pcbs = raw_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in raw_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_RAW_PCB, pcb); +} + +/** + * @ingroup raw_raw + * Create a RAW PCB. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new(u8_t proto) +{ + struct raw_pcb *pcb; + + LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); + /* could allocate RAW PCB? */ + if (pcb != NULL) { + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct raw_pcb)); + pcb->protocol = proto; + pcb->ttl = RAW_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + raw_set_multicast_ttl(pcb, RAW_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + pcb->next = raw_pcbs; + raw_pcbs = pcb; + } + return pcb; +} + +/** + * @ingroup raw_raw + * Create a RAW PCB for specific IP type. + * + * @return The RAW PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @param proto the protocol number (next header) of the IPv6 packet payload + * (e.g. IP6_NEXTH_ICMP6) + * + * @see raw_remove() + */ +struct raw_pcb * +raw_new_ip_type(u8_t type, u8_t proto) +{ + struct raw_pcb *pcb; + LWIP_ASSERT_CORE_LOCKED(); + pcb = raw_new(proto); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else /* LWIP_IPV4 && LWIP_IPV6 */ + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void raw_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct raw_pcb *rpcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(rpcb->local_ip, *new_addr); + } + } + } +} + +#endif /* LWIP_RAW */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c new file mode 100644 index 0000000..4602842 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/stats.c @@ -0,0 +1,169 @@ +/** + * @file + * Statistics module + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/stats.h" +#include "lwip/mem.h" +#include "lwip/debug.h" + +#include + +struct stats_ lwip_stats; + +void +stats_init(void) +{ +#ifdef LWIP_DEBUG +#if MEM_STATS + lwip_stats.mem.name = "MEM"; +#endif /* MEM_STATS */ +#endif /* LWIP_DEBUG */ +} + +#if LWIP_STATS_DISPLAY +void +stats_display_proto(struct stats_proto *proto, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); + LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); + LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); + LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); + LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); +} + +#if IGMP_STATS || MLD6_STATS +void +stats_display_igmp(struct stats_igmp *igmp, const char *name) +{ + LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); + LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); + LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); + LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); + LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); + LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); + LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); + LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); + LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); + LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); + LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); + LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); + LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); + LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); + LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); +} +#endif /* IGMP_STATS || MLD6_STATS */ + +#if MEM_STATS || MEMP_STATS +void +stats_display_mem(struct stats_mem *mem, const char *name) +{ + LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); + LWIP_PLATFORM_DIAG(("avail: %"MEM_SIZE_F"\n\t", mem->avail)); + LWIP_PLATFORM_DIAG(("used: %"MEM_SIZE_F"\n\t", mem->used)); + LWIP_PLATFORM_DIAG(("max: %"MEM_SIZE_F"\n\t", mem->max)); + LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n", mem->err)); +} + +#if MEMP_STATS +void +stats_display_memp(struct stats_mem *mem, int idx) +{ + if (idx < MEMP_MAX) { + stats_display_mem(mem, mem->name); + } +} +#endif /* MEMP_STATS */ +#endif /* MEM_STATS || MEMP_STATS */ + +#if SYS_STATS +void +stats_display_sys(struct stats_sys *sys) +{ + LWIP_PLATFORM_DIAG(("\nSYS\n\t")); + LWIP_PLATFORM_DIAG(("sem.used: %"STAT_COUNTER_F"\n\t", sys->sem.used)); + LWIP_PLATFORM_DIAG(("sem.max: %"STAT_COUNTER_F"\n\t", sys->sem.max)); + LWIP_PLATFORM_DIAG(("sem.err: %"STAT_COUNTER_F"\n\t", sys->sem.err)); + LWIP_PLATFORM_DIAG(("mutex.used: %"STAT_COUNTER_F"\n\t", sys->mutex.used)); + LWIP_PLATFORM_DIAG(("mutex.max: %"STAT_COUNTER_F"\n\t", sys->mutex.max)); + LWIP_PLATFORM_DIAG(("mutex.err: %"STAT_COUNTER_F"\n\t", sys->mutex.err)); + LWIP_PLATFORM_DIAG(("mbox.used: %"STAT_COUNTER_F"\n\t", sys->mbox.used)); + LWIP_PLATFORM_DIAG(("mbox.max: %"STAT_COUNTER_F"\n\t", sys->mbox.max)); + LWIP_PLATFORM_DIAG(("mbox.err: %"STAT_COUNTER_F"\n", sys->mbox.err)); +} +#endif /* SYS_STATS */ + +void +stats_display(void) +{ + s16_t i; + + LINK_STATS_DISPLAY(); + ETHARP_STATS_DISPLAY(); + IPFRAG_STATS_DISPLAY(); + IP6_FRAG_STATS_DISPLAY(); + IP_STATS_DISPLAY(); + ND6_STATS_DISPLAY(); + IP6_STATS_DISPLAY(); + IGMP_STATS_DISPLAY(); + MLD6_STATS_DISPLAY(); + ICMP_STATS_DISPLAY(); + ICMP6_STATS_DISPLAY(); + UDP_STATS_DISPLAY(); + TCP_STATS_DISPLAY(); + MEM_STATS_DISPLAY(); + for (i = 0; i < MEMP_MAX; i++) { + MEMP_STATS_DISPLAY(i); + } + SYS_STATS_DISPLAY(); +} +#endif /* LWIP_STATS_DISPLAY */ + +#endif /* LWIP_STATS */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c new file mode 100644 index 0000000..b95f4bd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/sys.c @@ -0,0 +1,148 @@ +/** + * @file + * lwIP Operating System abstraction + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup sys_layer Porting (system abstraction layer) + * @ingroup lwip + * + * @defgroup sys_os OS abstraction layer + * @ingroup sys_layer + * No need to implement functions in this section in NO_SYS mode. + * The OS-specific code should be implemented in arch/sys_arch.h + * and sys_arch.c of your port. + * + * The operating system emulation layer provides a common interface + * between the lwIP code and the underlying operating system kernel. The + * general idea is that porting lwIP to new architectures requires only + * small changes to a few header files and a new sys_arch + * implementation. It is also possible to do a sys_arch implementation + * that does not rely on any underlying operating system. + * + * The sys_arch provides semaphores, mailboxes and mutexes to lwIP. For the full + * lwIP functionality, multiple threads support can be implemented in the + * sys_arch, but this is not required for the basic lwIP + * functionality. Timer scheduling is implemented in lwIP, but can be implemented + * by the sys_arch port (LWIP_TIMERS_CUSTOM==1). + * + * In addition to the source file providing the functionality of sys_arch, + * the OS emulation layer must provide several header files defining + * macros used throughout lwip. The files required and the macros they + * must define are listed below the sys_arch description. + * + * Since lwIP 1.4.0, semaphore, mutexes and mailbox functions are prototyped in a way that + * allows both using pointers or actual OS structures to be used. This way, memory + * required for such types can be either allocated in place (globally or on the + * stack) or on the heap (allocated internally in the "*_new()" functions). + * + * Note: + * ----- + * Be careful with using mem_malloc() in sys_arch. When malloc() refers to + * mem_malloc() you can run into a circular function call problem. In mem.c + * mem_init() tries to allocate a semaphore using mem_malloc, which of course + * can't be performed when sys_arch uses mem_malloc. + * + * @defgroup sys_sem Semaphores + * @ingroup sys_os + * Semaphores can be either counting or binary - lwIP works with both + * kinds. + * Semaphores are represented by the type "sys_sem_t" which is typedef'd + * in the sys_arch.h file. Mailboxes are equivalently represented by the + * type "sys_mbox_t". Mutexes are represented by the type "sys_mutex_t". + * lwIP does not place any restrictions on how these types are represented + * internally. + * + * @defgroup sys_mutex Mutexes + * @ingroup sys_os + * Mutexes are recommended to correctly handle priority inversion, + * especially if you use LWIP_CORE_LOCKING . + * + * @defgroup sys_mbox Mailboxes + * @ingroup sys_os + * Mailboxes should be implemented as a queue which allows multiple messages + * to be posted (implementing as a rendez-vous point where only one message can be + * posted at a time can have a highly negative impact on performance). A message + * in a mailbox is just a pointer, nothing more. + * + * @defgroup sys_time Time + * @ingroup sys_layer + * + * @defgroup sys_prot Critical sections + * @ingroup sys_layer + * Used to protect short regions of code against concurrent access. + * - Your system is a bare-metal system (probably with an RTOS) + * and interrupts are under your control: + * Implement this as LockInterrupts() / UnlockInterrupts() + * - Your system uses an RTOS with deferred interrupt handling from a + * worker thread: Implement as a global mutex or lock/unlock scheduler + * - Your system uses a high-level OS with e.g. POSIX signals: + * Implement as a global mutex + * + * @defgroup sys_misc Misc + * @ingroup sys_os + */ + +#include "lwip/opt.h" + +#include "lwip/sys.h" + +/* Most of the functions defined in sys.h must be implemented in the + * architecture-dependent file sys_arch.c */ + +#if !NO_SYS + +#ifndef sys_msleep +/** + * Sleep for some ms. Timeouts are NOT processed while sleeping. + * + * @param ms number of milliseconds to sleep + */ +void +sys_msleep(u32_t ms) +{ + if (ms > 0) { + sys_sem_t delaysem; + err_t err = sys_sem_new(&delaysem, 0); + if (err == ERR_OK) { + sys_arch_sem_wait(&delaysem, ms); + sys_sem_free(&delaysem); + } + } +} +#endif /* sys_msleep */ + +#endif /* !NO_SYS */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c new file mode 100644 index 0000000..2ff37ef --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp.c @@ -0,0 +1,2686 @@ +/** + * @file + * Transmission Control Protocol for IP + * See also @ref tcp_raw + * + * @defgroup tcp_raw TCP + * @ingroup callbackstyle_api + * Transmission Control Protocol for IP\n + * @see @ref api + * + * Common functions for the TCP implementation, such as functions + * for manipulating the data structures and the TCP timer functions. TCP functions + * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n + * + * TCP connection setup + * -------------------- + * The functions used for setting up connections is similar to that of + * the sequential API and of the BSD socket API. A new TCP connection + * identifier (i.e., a protocol control block - PCB) is created with the + * tcp_new() function. This PCB can then be either set to listen for new + * incoming connections or be explicitly connected to another host. + * - tcp_new() + * - tcp_bind() + * - tcp_listen() and tcp_listen_with_backlog() + * - tcp_accept() + * - tcp_connect() + * + * Sending TCP data + * ---------------- + * TCP data is sent by enqueueing the data with a call to tcp_write() and + * triggering to send by calling tcp_output(). When the data is successfully + * transmitted to the remote host, the application will be notified with a + * call to a specified callback function. + * - tcp_write() + * - tcp_output() + * - tcp_sent() + * + * Receiving TCP data + * ------------------ + * TCP data reception is callback based - an application specified + * callback function is called when new data arrives. When the + * application has taken the data, it has to call the tcp_recved() + * function to indicate that TCP can advertise increase the receive + * window. + * - tcp_recv() + * - tcp_recved() + * + * Application polling + * ------------------- + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + * - tcp_poll() + * + * Closing and aborting connections + * -------------------------------- + * - tcp_close() + * - tcp_abort() + * - tcp_err() + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/tcp.h" +#include "lwip/priv/tcp_priv.h" +#include "lwip/debug.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/nd6.h" + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +#ifndef TCP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define TCP_LOCAL_PORT_RANGE_START 0xc000 +#define TCP_LOCAL_PORT_RANGE_END 0xffff +#define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) +#endif + +#if LWIP_TCP_KEEPALIVE +#define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) +#define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) +#else /* LWIP_TCP_KEEPALIVE */ +#define TCP_KEEP_DUR(pcb) TCP_MAXIDLE +#define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT +#endif /* LWIP_TCP_KEEPALIVE */ + +/* As initial send MSS, we use TCP_MSS but limit it to 536. */ +#if TCP_MSS > 536 +#define INITIAL_MSS 536 +#else +#define INITIAL_MSS TCP_MSS +#endif + +static const char *const tcp_state_str[] = { + "CLOSED", + "LISTEN", + "SYN_SENT", + "SYN_RCVD", + "ESTABLISHED", + "FIN_WAIT_1", + "FIN_WAIT_2", + "CLOSE_WAIT", + "CLOSING", + "LAST_ACK", + "TIME_WAIT" +}; + +/* last local TCP port */ +static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; + +/* Incremented every coarse grained timer shot (typically every 500 ms). */ +u32_t tcp_ticks; +static const u8_t tcp_backoff[13] = +{ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; +/* Times per slowtmr hits */ +static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; + +/* The TCP PCB lists. */ + +/** List of all TCP PCBs bound but not yet (connected || listening) */ +struct tcp_pcb *tcp_bound_pcbs; +/** List of all TCP PCBs in LISTEN state */ +union tcp_listen_pcbs_t tcp_listen_pcbs; +/** List of all TCP PCBs that are in a state in which + * they accept or send data. */ +struct tcp_pcb *tcp_active_pcbs; +/** List of all TCP PCBs in TIME-WAIT state */ +struct tcp_pcb *tcp_tw_pcbs; + +/** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ +struct tcp_pcb **const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, + &tcp_active_pcbs, &tcp_tw_pcbs +}; + +u8_t tcp_active_pcbs_changed; + +/** Timer counter to handle calling slow-timer from tcp_tmr() */ +static u8_t tcp_timer; +static u8_t tcp_timer_ctr; +static u16_t tcp_new_port(void); + +static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); +#if LWIP_TCP_PCB_NUM_EXT_ARGS +static void tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args); +#endif + +/** + * Initialize this module. + */ +void +tcp_init(void) +{ +#ifdef LWIP_RAND + tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** Free a tcp pcb */ +void +tcp_free(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free: LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB, pcb); +} + +/** Free a tcp listen pcb */ +static void +tcp_free_listen(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_free_listen: !LISTEN", pcb->state != LISTEN); +#if LWIP_TCP_PCB_NUM_EXT_ARGS + tcp_ext_arg_invoke_callbacks_destroyed(pcb->ext_args); +#endif + memp_free(MEMP_TCP_PCB_LISTEN, pcb); +} + +/** + * Called periodically to dispatch TCP timers. + */ +void +tcp_tmr(void) +{ + /* Call tcp_fasttmr() every 250 ms */ + tcp_fasttmr(); + + if (++tcp_timer & 1) { + /* Call tcp_slowtmr() every 500 ms, i.e., every other timer + tcp_tmr() is called. */ + tcp_slowtmr(); + } +} + +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG +/** Called when a listen pcb is closed. Iterates one pcb list and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT("tcp_remove_listener: invalid listener", lpcb != NULL); + + for (pcb = list; pcb != NULL; pcb = pcb->next) { + if (pcb->listener == lpcb) { + pcb->listener = NULL; + } + } +} +#endif + +/** Called when a listen pcb is closed. Iterates all pcb lists and removes the + * closed listener pcb from pcb->listener if matching. + */ +static void +tcp_listen_closed(struct tcp_pcb *pcb) +{ +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + size_t i; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); + for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { + tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen *)pcb); + } +#endif + LWIP_UNUSED_ARG(pcb); +} + +#if TCP_LISTEN_BACKLOG +/** @ingroup tcp_raw + * Delay accepting a connection in respect to the listen backlog: + * the number of outstanding connections is increased until + * tcp_backlog_accepted() is called. + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is not fully accepted yet + */ +void +tcp_backlog_delayed(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) == 0) { + if (pcb->listener != NULL) { + pcb->listener->accepts_pending++; + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + tcp_set_flags(pcb, TF_BACKLOGPEND); + } + } +} + +/** @ingroup tcp_raw + * A delayed-accept a connection is accepted (or closed/aborted): decreases + * the number of outstanding connections after calling tcp_backlog_delayed(). + * + * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() + * or else the backlog feature will get out of sync! + * + * @param pcb the connection pcb which is now fully accepted (or closed/aborted) + */ +void +tcp_backlog_accepted(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb->flags & TF_BACKLOGPEND) != 0) { + if (pcb->listener != NULL) { + LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); + pcb->listener->accepts_pending--; + tcp_clear_flags(pcb, TF_BACKLOGPEND); + } + } +} +#endif /* TCP_LISTEN_BACKLOG */ + +/** + * Closes the TX side of a connection held by the PCB. + * For tcp_close(), a RST is sent if the application didn't receive all data + * (tcp_recved() not called for all data passed to recv callback). + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +static err_t +tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) +{ + LWIP_ASSERT("tcp_close_shutdown: invalid pcb", pcb != NULL); + + if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { + if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { + /* Not all data received by application, send RST to tell the remote + side about this. */ + LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); + + /* don't call tcp_abort here: we must not deallocate the pcb since + that might not be expected when calling tcp_close */ + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + /* Deallocate the pcb since we already sent a RST for it */ + if (tcp_input_pcb == pcb) { + /* prevent using a deallocated pcb: free it from tcp_input later */ + tcp_trigger_input_pcb_close(); + } else { + tcp_free(pcb); + } + return ERR_OK; + } + } + + /* - states which free the pcb are handled here, + - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ + switch (pcb->state) { + case CLOSED: + /* Closing a pcb in the CLOSED state might seem erroneous, + * however, it is in this state once allocated and as yet unused + * and the user needs some way to free it should the need arise. + * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) + * or for a pcb that has been used and then entered the CLOSED state + * is erroneous, but this should never happen as the pcb has in those cases + * been freed, and so any remaining handles are bogus. */ + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + tcp_free(pcb); + break; + case LISTEN: + tcp_listen_closed(pcb); + tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); + tcp_free_listen(pcb); + break; + case SYN_SENT: + TCP_PCB_REMOVE_ACTIVE(pcb); + tcp_free(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + break; + default: + return tcp_close_shutdown_fin(pcb); + } + return ERR_OK; +} + +static err_t +tcp_close_shutdown_fin(struct tcp_pcb *pcb) +{ + err_t err; + LWIP_ASSERT("pcb != NULL", pcb != NULL); + + switch (pcb->state) { + case SYN_RCVD: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + tcp_backlog_accepted(pcb); + MIB2_STATS_INC(mib2.tcpattemptfails); + pcb->state = FIN_WAIT_1; + } + break; + case ESTABLISHED: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = FIN_WAIT_1; + } + break; + case CLOSE_WAIT: + err = tcp_send_fin(pcb); + if (err == ERR_OK) { + MIB2_STATS_INC(mib2.tcpestabresets); + pcb->state = LAST_ACK; + } + break; + default: + /* Has already been closed, do nothing. */ + return ERR_OK; + } + + if (err == ERR_OK) { + /* To ensure all data has been sent when tcp_close returns, we have + to make sure tcp_output doesn't fail. + Since we don't really have to ensure all data has been sent when tcp_close + returns (unsent data is sent from tcp timer functions, also), we don't care + for the return value of tcp_output for now. */ + tcp_output(pcb); + } else if (err == ERR_MEM) { + /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ + tcp_set_flags(pcb, TF_CLOSEPEND); + /* We have to return ERR_OK from here to indicate to the callers that this + pcb should not be used any more as it will be freed soon via tcp_tmr. + This is OK here since sending FIN does not guarantee a time frime for + actually freeing the pcb, either (it is left in closure states for + remote ACK or timeout) */ + return ERR_OK; + } + return err; +} + +/** + * @ingroup tcp_raw + * Closes the connection held by the PCB. + * + * Listening pcbs are freed and may not be referenced any more. + * Connection pcbs are freed if not yet connected and may not be referenced + * any more. If a connection is established (at least SYN received or in + * a closing state), the connection is closed, and put in a closing state. + * The pcb is then automatically freed in tcp_slowtmr(). It is therefore + * unsafe to reference it (unless an error is returned). + * + * The function may return ERR_MEM if no memory + * was available for closing the connection. If so, the application + * should wait and try again either by using the acknowledgment + * callback or the polling functionality. If the close succeeds, the + * function returns ERR_OK. + * + * @param pcb the tcp_pcb to close + * @return ERR_OK if connection has been closed + * another err_t if closing failed and pcb is not freed + */ +err_t +tcp_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_close: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); + + tcp_debug_print_state(pcb->state); + + if (pcb->state != LISTEN) { + /* Set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + } + /* ... and close */ + return tcp_close_shutdown(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Causes all or part of a full-duplex connection of this PCB to be shut down. + * This doesn't deallocate the PCB unless shutting down both sides! + * Shutting down both sides is the same as calling tcp_close, so if it succeds + * (i.e. returns ER_OK), the PCB must not be referenced any more! + * + * @param pcb PCB to shutdown + * @param shut_rx shut down receive side if this is != 0 + * @param shut_tx shut down send side if this is != 0 + * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) + * another err_t on error. + */ +err_t +tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_shutdown: invalid pcb", pcb != NULL, return ERR_ARG); + + if (pcb->state == LISTEN) { + return ERR_CONN; + } + if (shut_rx) { + /* shut down the receive side: set a flag not to receive any more data... */ + tcp_set_flags(pcb, TF_RXCLOSED); + if (shut_tx) { + /* shutting down the tx AND rx side is the same as closing for the raw API */ + return tcp_close_shutdown(pcb, 1); + } + /* ... and free buffered data */ + if (pcb->refused_data != NULL) { + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + } + if (shut_tx) { + /* This can't happen twice since if it succeeds, the pcb's state is changed. + Only close in these states as the others directly deallocate the PCB */ + switch (pcb->state) { + case SYN_RCVD: + case ESTABLISHED: + case CLOSE_WAIT: + return tcp_close_shutdown(pcb, (u8_t)shut_rx); + default: + /* Not (yet?) connected, cannot shutdown the TX side as that would bring us + into CLOSED state, where the PCB is deallocated. */ + return ERR_CONN; + } + } + return ERR_OK; +} + +/** + * Abandons a connection and optionally sends a RST to the remote + * host. Deletes the local protocol control block. This is done when + * a connection is killed because of shortage of memory. + * + * @param pcb the tcp_pcb to abort + * @param reset boolean to indicate whether a reset should be sent + */ +void +tcp_abandon(struct tcp_pcb *pcb, int reset) +{ + u32_t seqno, ackno; +#if LWIP_CALLBACK_API + tcp_err_fn errf; +#endif /* LWIP_CALLBACK_API */ + void *errf_arg; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_abandon: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", + pcb->state != LISTEN); + /* Figure out on which TCP PCB list we are, and remove us. If we + are in an active state, call the receive function associated with + the PCB with a NULL argument, and send an RST to the remote end. */ + if (pcb->state == TIME_WAIT) { + tcp_pcb_remove(&tcp_tw_pcbs, pcb); + tcp_free(pcb); + } else { + int send_rst = 0; + u16_t local_port = 0; + enum tcp_state last_state; + seqno = pcb->snd_nxt; + ackno = pcb->rcv_nxt; +#if LWIP_CALLBACK_API + errf = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + errf_arg = pcb->callback_arg; + if (pcb->state == CLOSED) { + if (pcb->local_port != 0) { + /* bound, not yet opened */ + TCP_RMV(&tcp_bound_pcbs, pcb); + } + } else { + send_rst = reset; + local_port = pcb->local_port; + TCP_PCB_REMOVE_ACTIVE(pcb); + } + if (pcb->unacked != NULL) { + tcp_segs_free(pcb->unacked); + } + if (pcb->unsent != NULL) { + tcp_segs_free(pcb->unsent); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + tcp_segs_free(pcb->ooseq); + } +#endif /* TCP_QUEUE_OOSEQ */ + tcp_backlog_accepted(pcb); + if (send_rst) { + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); + tcp_rst(pcb, seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); + } + last_state = pcb->state; + tcp_free(pcb); + TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); + } +} + +/** + * @ingroup tcp_raw + * Aborts the connection by sending a RST (reset) segment to the remote + * host. The pcb is deallocated. This function never fails. + * + * ATTENTION: When calling this from one of the TCP callbacks, make + * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise + * or you will risk accessing deallocated memory or memory leaks! + * + * @param pcb the tcp pcb to abort + */ +void +tcp_abort(struct tcp_pcb *pcb) +{ + tcp_abandon(pcb, 1); +} + +/** + * @ingroup tcp_raw + * Binds the connection to a local port number and IP address. If the + * IP address is not given (i.e., ipaddr == IP_ANY_TYPE), the connection is + * bound to all local IP addresses. + * If another connection is bound to the same port, the function will + * return ERR_USE, otherwise ERR_OK is returned. + * + * @param pcb the tcp_pcb to bind (no check is done whether this pcb is + * already bound!) + * @param ipaddr the local ip address to bind to (use IPx_ADDR_ANY to bind + * to any local address + * @param port the local port to bind to + * @return ERR_USE if the port is already in use + * ERR_VAL if bind failed because the PCB is not in a valid state + * ERR_OK if bound + */ +err_t +tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + int i; + int max_pcb_list = NUM_TCP_PCB_LISTS; + struct tcp_pcb *cpcb; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("tcp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("tcp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); + +#if SO_REUSE + /* Unless the REUSEADDR flag is set, + we have to check the pcbs in TIME-WAIT state, also. + We do not dump TIME_WAIT pcb's; they can still be matched by incoming + packets using both local and remote IP addresses and ports to distinguish. + */ + if (ip_get_option(pcb, SOF_REUSEADDR)) { + max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; + } +#endif /* SO_REUSE */ + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNICAST)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + if (port == 0) { + port = tcp_new_port(); + if (port == 0) { + return ERR_BUF; + } + } else { + /* Check if the address already is in use (on all lists) */ + for (i = 0; i < max_pcb_list; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if (cpcb->local_port == port) { +#if SO_REUSE + /* Omit checking for the same port if both pcbs have REUSEADDR set. + For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in + tcp_connect. */ + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(cpcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* @todo: check accept_any_ip_version */ + if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && + (ip_addr_isany(&cpcb->local_ip) || + ip_addr_isany(ipaddr) || + ip_addr_cmp(&cpcb->local_ip, ipaddr))) { + return ERR_USE; + } + } + } + } + } + } + + if (!ip_addr_isany(ipaddr) +#if LWIP_IPV4 && LWIP_IPV6 + || (IP_GET_TYPE(ipaddr) != IP_GET_TYPE(&pcb->local_ip)) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ) { + ip_addr_set(&pcb->local_ip, ipaddr); + } + pcb->local_port = port; + TCP_REG(&tcp_bound_pcbs, pcb); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Binds the connection to a netif and IP address. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb the tcp_pcb to bind. + * @param netif the netif to bind to. Can be NULL. + */ +void +tcp_bind_netif(struct tcp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +#if LWIP_CALLBACK_API +/** + * Default accept callback if no accept callback is specified by the user. + */ +static err_t +tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) +{ + LWIP_UNUSED_ARG(arg); + LWIP_UNUSED_ARG(err); + + LWIP_ASSERT("tcp_accept_null: invalid pcb", pcb != NULL); + + tcp_abort(pcb); + + return ERR_ABRT; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * When an incoming connection is accepted, the function specified with + * the tcp_accept() function will be called. The pcb has to be bound + * to a local port with the tcp_bind() function. + * + * The tcp_listen() function returns a new connection identifier, and + * the one passed as an argument to the function will be + * deallocated. The reason for this behavior is that less memory is + * needed for a connection that is listening, so tcp_listen() will + * reclaim the memory needed for the original connection and allocate a + * new smaller memory block for the listening connection. + * + * tcp_listen() may return NULL if no memory was available for the + * listening connection. If so, the memory associated with the pcb + * passed as an argument to tcp_listen() will not be deallocated. + * + * The backlog limits the number of outstanding connections + * in the listen queue to the value specified by the backlog argument. + * To use it, your need to set TCP_LISTEN_BACKLOG=1 in your lwipopts.h. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog(tpcb, backlog); + */ +struct tcp_pcb * +tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) +{ + LWIP_ASSERT_CORE_LOCKED(); + return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); +} + +/** + * @ingroup tcp_raw + * Set the state of the connection to be LISTEN, which means that it + * is able to accept incoming connections. The protocol control block + * is reallocated in order to consume less memory. Setting the + * connection to LISTEN is an irreversible process. + * + * @param pcb the original tcp_pcb + * @param backlog the incoming connections queue limit + * @param err when NULL is returned, this contains the error reason + * @return tcp_pcb used for listening, consumes less memory. + * + * @note The original tcp_pcb is freed. This function therefore has to be + * called like this: + * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); + */ +struct tcp_pcb * +tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) +{ + struct tcp_pcb_listen *lpcb = NULL; + err_t res; + + LWIP_UNUSED_ARG(backlog); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_listen_with_backlog_and_err: invalid pcb", pcb != NULL, res = ERR_ARG; goto done); + LWIP_ERROR("tcp_listen_with_backlog_and_err: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); + + /* already listening? */ + if (pcb->state == LISTEN) { + lpcb = (struct tcp_pcb_listen *)pcb; + res = ERR_ALREADY; + goto done; + } +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage + is declared (listen-/connection-pcb), we have to make sure now that + this port is only used once for every local IP. */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + if ((lpcb->local_port == pcb->local_port) && + ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { + /* this address/port is already used */ + lpcb = NULL; + res = ERR_USE; + goto done; + } + } + } +#endif /* SO_REUSE */ + lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); + if (lpcb == NULL) { + res = ERR_MEM; + goto done; + } + lpcb->callback_arg = pcb->callback_arg; + lpcb->local_port = pcb->local_port; + lpcb->state = LISTEN; + lpcb->prio = pcb->prio; + lpcb->so_options = pcb->so_options; + lpcb->netif_idx = NETIF_NO_INDEX; + lpcb->ttl = pcb->ttl; + lpcb->tos = pcb->tos; +#if LWIP_IPV4 && LWIP_IPV6 + IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + ip_addr_copy(lpcb->local_ip, pcb->local_ip); + if (pcb->local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } +#if LWIP_TCP_PCB_NUM_EXT_ARGS + /* copy over ext_args to listening pcb */ + memcpy(&lpcb->ext_args, &pcb->ext_args, sizeof(pcb->ext_args)); +#endif + tcp_free(pcb); +#if LWIP_CALLBACK_API + lpcb->accept = tcp_accept_null; +#endif /* LWIP_CALLBACK_API */ +#if TCP_LISTEN_BACKLOG + lpcb->accepts_pending = 0; + tcp_backlog_set(lpcb, backlog); +#endif /* TCP_LISTEN_BACKLOG */ + TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); + res = ERR_OK; +done: + if (err != NULL) { + *err = res; + } + return (struct tcp_pcb *)lpcb; +} + +/** + * Update the state that tracks the available window space to advertise. + * + * Returns how much extra window would be advertised if we sent an + * update now. + */ +u32_t +tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) +{ + u32_t new_right_edge; + + LWIP_ASSERT("tcp_update_rcv_ann_wnd: invalid pcb", pcb != NULL); + new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; + + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + /* we can advertise more window */ + pcb->rcv_ann_wnd = pcb->rcv_wnd; + return new_right_edge - pcb->rcv_ann_right_edge; + } else { + if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { + /* Can happen due to other end sending out of advertised window, + * but within actual available (but not yet advertised) window */ + pcb->rcv_ann_wnd = 0; + } else { + /* keep the right edge of window constant */ + u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; +#if !LWIP_WND_SCALE + LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); +#endif + pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; + } + return 0; + } +} + +/** + * @ingroup tcp_raw + * This function should be called by the application when it has + * processed the data. The purpose is to advertise a larger window + * when the data has been processed. + * + * @param pcb the tcp_pcb for which data is read + * @param len the amount of bytes that have been read by the application + */ +void +tcp_recved(struct tcp_pcb *pcb, u16_t len) +{ + u32_t wnd_inflation; + tcpwnd_size_t rcv_wnd; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_recved: invalid pcb", pcb != NULL, return); + + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_recved for listen-pcbs", + pcb->state != LISTEN); + + rcv_wnd = (tcpwnd_size_t)(pcb->rcv_wnd + len); + if ((rcv_wnd > TCP_WND_MAX(pcb)) || (rcv_wnd < pcb->rcv_wnd)) { + /* window got too big or tcpwnd_size_t overflow */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: window got too big or tcpwnd_size_t overflow\n")); + pcb->rcv_wnd = TCP_WND_MAX(pcb); + } else { + pcb->rcv_wnd = rcv_wnd; + } + + wnd_inflation = tcp_update_rcv_ann_wnd(pcb); + + /* If the change in the right edge of window is significant (default + * watermark is TCP_WND/4), then send an explicit update now. + * Otherwise wait for a packet to be sent in the normal course of + * events (or more window to be available later) */ + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", + len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); +} + +/** + * Allocate a new local TCP port. + * + * @return a new (free) local TCP port number + */ +static u16_t +tcp_new_port(void) +{ + u8_t i; + u16_t n = 0; + struct tcp_pcb *pcb; + +again: + tcp_port++; + if (tcp_port == TCP_LOCAL_PORT_RANGE_END) { + tcp_port = TCP_LOCAL_PORT_RANGE_START; + } + /* Check all PCB lists. */ + for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { + for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == tcp_port) { + n++; + if (n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + } + return tcp_port; +} + +/** + * @ingroup tcp_raw + * Connects to another host. The function given as the "connected" + * argument will be called when the connection has been established. + * Sets up the pcb to connect to the remote host and sends the + * initial SYN segment which opens the connection. + * + * The tcp_connect() function returns immediately; it does not wait for + * the connection to be properly setup. Instead, it will call the + * function specified as the fourth argument (the "connected" argument) + * when the connection is established. If the connection could not be + * properly established, either because the other host refused the + * connection or because the other host didn't answer, the "err" + * callback function of this pcb (registered with tcp_err, see below) + * will be called. + * + * The tcp_connect() function can return ERR_MEM if no memory is + * available for enqueueing the SYN segment. If the SYN indeed was + * enqueued successfully, the tcp_connect() function returns ERR_OK. + * + * @param pcb the tcp_pcb used to establish the connection + * @param ipaddr the remote ip address to connect to + * @param port the remote tcp port to connect to + * @param connected callback function to call when connected (on error, + the err calback will be called) + * @return ERR_VAL if invalid arguments are given + * ERR_OK if connect request has been sent + * other err_t values if connect request couldn't be sent + */ +err_t +tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, + tcp_connected_fn connected) +{ + struct netif *netif = NULL; + err_t ret; + u32_t iss; + u16_t old_local_port; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("tcp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); + ip_addr_set(&pcb->remote_ip, ipaddr); + pcb->remote_port = port; + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { + /* check if we have a route to the remote host */ + netif = ip_route(&pcb->local_ip, &pcb->remote_ip); + } + if (netif == NULL) { + /* Don't even try to send a SYN packet if we have no route since that will fail. */ + return ERR_RTE; + } + + /* check if local IP has been assigned to pcb, if not, get one */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, ipaddr); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * Given that we already have the target netif, this is easy and cheap. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST)) { + ip6_addr_assign_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNICAST, netif); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + old_local_port = pcb->local_port; + if (pcb->local_port == 0) { + pcb->local_port = tcp_new_port(); + if (pcb->local_port == 0) { + return ERR_BUF; + } + } else { +#if SO_REUSE + if (ip_get_option(pcb, SOF_REUSEADDR)) { + /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure + now that the 5-tuple is unique. */ + struct tcp_pcb *cpcb; + int i; + /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ + for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { + for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { + if ((cpcb->local_port == pcb->local_port) && + (cpcb->remote_port == port) && + ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && + ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { + /* linux returns EISCONN here, but ERR_USE should be OK for us */ + return ERR_USE; + } + } + } + } +#endif /* SO_REUSE */ + } + + iss = tcp_next_iss(pcb); + pcb->rcv_nxt = 0; + pcb->snd_nxt = iss; + pcb->lastack = iss - 1; + pcb->snd_wl2 = iss - 1; + pcb->snd_lbb = iss - 1; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->snd_wnd = TCP_WND; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss_netif(pcb->mss, netif, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + pcb->cwnd = 1; +#if LWIP_CALLBACK_API + pcb->connected = connected; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(connected); +#endif /* LWIP_CALLBACK_API */ + + /* Send a SYN together with the MSS option. */ + ret = tcp_enqueue_flags(pcb, TCP_SYN); + if (ret == ERR_OK) { + /* SYN segment was enqueued, changed the pcbs state now */ + pcb->state = SYN_SENT; + if (old_local_port != 0) { + TCP_RMV(&tcp_bound_pcbs, pcb); + } + TCP_REG_ACTIVE(pcb); + MIB2_STATS_INC(mib2.tcpactiveopens); + + tcp_output(pcb); + } + return ret; +} + +/** + * Called every 500 ms and implements the retransmission timer and the timer that + * removes PCBs that have been in TIME-WAIT for enough time. It also increments + * various timers such as the inactivity timer in each PCB. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_slowtmr(void) +{ + struct tcp_pcb *pcb, *prev; + tcpwnd_size_t eff_wnd; + u8_t pcb_remove; /* flag if a PCB should be removed */ + u8_t pcb_reset; /* flag if a RST should be sent when removing */ + err_t err; + + err = ERR_OK; + + ++tcp_ticks; + ++tcp_timer_ctr; + +tcp_slowtmr_start: + /* Steps through all of the active PCBs. */ + prev = NULL; + pcb = tcp_active_pcbs; + if (pcb == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); + } + while (pcb != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); + LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); + if (pcb->last_timer == tcp_timer_ctr) { + /* skip this pcb, we have already processed it */ + prev = pcb; + pcb = pcb->next; + continue; + } + pcb->last_timer = tcp_timer_ctr; + + pcb_remove = 0; + pcb_reset = 0; + + if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); + } else if (pcb->nrtx >= TCP_MAXRTX) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); + } else { + if (pcb->persist_backoff > 0) { + LWIP_ASSERT("tcp_slowtimr: persist ticking with in-flight data", pcb->unacked == NULL); + LWIP_ASSERT("tcp_slowtimr: persist ticking with empty send buffer", pcb->unsent != NULL); + if (pcb->persist_probe >= TCP_MAXRTX) { + ++pcb_remove; /* max probes reached */ + } else { + u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff - 1]; + if (pcb->persist_cnt < backoff_cnt) { + pcb->persist_cnt++; + } + if (pcb->persist_cnt >= backoff_cnt) { + int next_slot = 1; /* increment timer to next slot */ + /* If snd_wnd is zero, send 1 byte probes */ + if (pcb->snd_wnd == 0) { + if (tcp_zero_window_probe(pcb) != ERR_OK) { + next_slot = 0; /* try probe again with current slot */ + } + /* snd_wnd not fully closed, split unsent head and fill window */ + } else { + if (tcp_split_unsent_seg(pcb, (u16_t)pcb->snd_wnd) == ERR_OK) { + if (tcp_output(pcb) == ERR_OK) { + /* sending will cancel persist timer, else retry with current slot */ + next_slot = 0; + } + } + } + if (next_slot) { + pcb->persist_cnt = 0; + if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { + pcb->persist_backoff++; + } + } + } + } + } else { + /* Increase the retransmission timer if it is running */ + if ((pcb->rtime >= 0) && (pcb->rtime < 0x7FFF)) { + ++pcb->rtime; + } + + if (pcb->rtime >= pcb->rto) { + /* Time for a retransmission. */ + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F + " pcb->rto %"S16_F"\n", + pcb->rtime, pcb->rto)); + /* If prepare phase fails but we have unsent data but no unacked data, + still execute the backoff calculations below, as this means we somehow + failed to send segment. */ + if ((tcp_rexmit_rto_prepare(pcb) == ERR_OK) || ((pcb->unacked == NULL) && (pcb->unsent != NULL))) { + /* Double retransmission time-out unless we are trying to + * connect to somebody (i.e., we are in SYN_SENT). */ + if (pcb->state != SYN_SENT) { + u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff) - 1); + int calc_rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; + pcb->rto = (s16_t)LWIP_MIN(calc_rto, 0x7FFF); + } + + /* Reset the retransmission timer. */ + pcb->rtime = 0; + + /* Reduce congestion window and ssthresh. */ + eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); + pcb->ssthresh = eff_wnd >> 1; + if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { + pcb->ssthresh = (tcpwnd_size_t)(pcb->mss << 1); + } + pcb->cwnd = pcb->mss; + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + pcb->bytes_acked = 0; + + /* The following needs to be called AFTER cwnd is set to one + mss - STJ */ + tcp_rexmit_rto_commit(pcb); + } + } + } + } + /* Check if this PCB has stayed too long in FIN-WAIT-2 */ + if (pcb->state == FIN_WAIT_2) { + /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ + if (pcb->flags & TF_RXCLOSED) { + /* PCB was fully closed (either through close() or SHUT_RDWR): + normal FIN-WAIT timeout handling. */ + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); + } + } + } + + /* Check if KEEPALIVE should be sent */ + if (ip_get_option(pcb, SOF_KEEPALIVE) && + ((pcb->state == ESTABLISHED) || + (pcb->state == CLOSE_WAIT))) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + ++pcb_remove; + ++pcb_reset; + } else if ((u32_t)(tcp_ticks - pcb->tmr) > + (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) + / TCP_SLOW_INTERVAL) { + err = tcp_keepalive(pcb); + if (err == ERR_OK) { + pcb->keep_cnt_sent++; + } + } + } + + /* If this PCB has queued out of sequence data, but has been + inactive for too long, will drop the data (it will eventually + be retransmitted). */ +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL && + (tcp_ticks - pcb->tmr >= (u32_t)pcb->rto * TCP_OOSEQ_TIMEOUT)) { + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Check if this PCB has stayed too long in SYN-RCVD */ + if (pcb->state == SYN_RCVD) { + if ((u32_t)(tcp_ticks - pcb->tmr) > + TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); + } + } + + /* Check if this PCB has stayed too long in LAST-ACK */ + if (pcb->state == LAST_ACK) { + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); + } + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; +#if LWIP_CALLBACK_API + tcp_err_fn err_fn = pcb->errf; +#endif /* LWIP_CALLBACK_API */ + void *err_arg; + enum tcp_state last_state; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_active_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); + tcp_active_pcbs = pcb->next; + } + + if (pcb_reset) { + tcp_rst(pcb, pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, + pcb->local_port, pcb->remote_port); + } + + err_arg = pcb->callback_arg; + last_state = pcb->state; + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + + tcp_active_pcbs_changed = 0; + TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + } else { + /* get the 'next' element now and work with 'prev' below (in case of abort) */ + prev = pcb; + pcb = pcb->next; + + /* We check if we should poll the connection. */ + ++prev->polltmr; + if (prev->polltmr >= prev->pollinterval) { + prev->polltmr = 0; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); + tcp_active_pcbs_changed = 0; + TCP_EVENT_POLL(prev, err); + if (tcp_active_pcbs_changed) { + goto tcp_slowtmr_start; + } + /* if err == ERR_ABRT, 'prev' is already deallocated */ + if (err == ERR_OK) { + tcp_output(prev); + } + } + } + } + + + /* Steps through all of the TIME-WAIT PCBs. */ + prev = NULL; + pcb = tcp_tw_pcbs; + while (pcb != NULL) { + LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + pcb_remove = 0; + + /* Check if this PCB has stayed long enough in TIME-WAIT */ + if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { + ++pcb_remove; + } + + /* If the PCB should be removed, do it. */ + if (pcb_remove) { + struct tcp_pcb *pcb2; + tcp_pcb_purge(pcb); + /* Remove PCB from tcp_tw_pcbs list. */ + if (prev != NULL) { + LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); + prev->next = pcb->next; + } else { + /* This PCB was the first. */ + LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); + tcp_tw_pcbs = pcb->next; + } + pcb2 = pcb; + pcb = pcb->next; + tcp_free(pcb2); + } else { + prev = pcb; + pcb = pcb->next; + } + } +} + +/** + * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously + * "refused" by upper layer (application) and sends delayed ACKs or pending FINs. + * + * Automatically called from tcp_tmr(). + */ +void +tcp_fasttmr(void) +{ + struct tcp_pcb *pcb; + + ++tcp_timer_ctr; + +tcp_fasttmr_start: + pcb = tcp_active_pcbs; + + while (pcb != NULL) { + if (pcb->last_timer != tcp_timer_ctr) { + struct tcp_pcb *next; + pcb->last_timer = tcp_timer_ctr; + /* send delayed ACKs */ + if (pcb->flags & TF_ACK_DELAY) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); + tcp_ack_now(pcb); + tcp_output(pcb); + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + + next = pcb->next; + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + tcp_active_pcbs_changed = 0; + tcp_process_refused_data(pcb); + if (tcp_active_pcbs_changed) { + /* application callback has changed the pcb list: restart the loop */ + goto tcp_fasttmr_start; + } + } + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ +void +tcp_txnow(void) +{ + struct tcp_pcb *pcb; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->flags & TF_NAGLEMEMERR) { + tcp_output(pcb); + } + } +} + +/** Pass pcb->refused_data to the recv callback */ +err_t +tcp_process_refused_data(struct tcp_pcb *pcb) +{ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + struct pbuf *rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ERROR("tcp_process_refused_data: invalid pcb", pcb != NULL, return ERR_ARG); + +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (pcb->refused_data != NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + { + err_t err; + u8_t refused_flags = pcb->refused_data->flags; + /* set pcb->refused_data to NULL in case the callback frees it and then + closes the pcb */ + struct pbuf *refused_data = pcb->refused_data; +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + pbuf_split_64k(refused_data, &rest); + pcb->refused_data = rest; +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = NULL; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + /* Notify again application with data previously received. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); + TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); + if (err == ERR_OK) { + /* did refused_data include a FIN? */ + if ((refused_flags & PBUF_FLAG_TCP_FIN) +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + && (rest == NULL) +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + ) { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + } + } else if (err == ERR_ABRT) { + /* if err == ERR_ABRT, 'pcb' is already deallocated */ + /* Drop incoming packets because pcb is "full" (only if the incoming + segment contains data). */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); + return ERR_ABRT; + } else { + /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(refused_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = refused_data; + return ERR_INPROGRESS; + } + } + return ERR_OK; +} + +/** + * Deallocates a list of TCP segments (tcp_seg structures). + * + * @param seg tcp_seg list of TCP segments to free + */ +void +tcp_segs_free(struct tcp_seg *seg) +{ + while (seg != NULL) { + struct tcp_seg *next = seg->next; + tcp_seg_free(seg); + seg = next; + } +} + +/** + * Frees a TCP segment (tcp_seg structure). + * + * @param seg single tcp_seg to free + */ +void +tcp_seg_free(struct tcp_seg *seg) +{ + if (seg != NULL) { + if (seg->p != NULL) { + pbuf_free(seg->p); +#if TCP_DEBUG + seg->p = NULL; +#endif /* TCP_DEBUG */ + } + memp_free(MEMP_TCP_SEG, seg); + } +} + +/** + * @ingroup tcp + * Sets the priority of a connection. + * + * @param pcb the tcp_pcb to manipulate + * @param prio new priority + */ +void +tcp_setprio(struct tcp_pcb *pcb, u8_t prio) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_setprio: invalid pcb", pcb != NULL, return); + + pcb->prio = prio; +} + +#if TCP_QUEUE_OOSEQ +/** + * Returns a copy of the given TCP segment. + * The pbuf and data are not copied, only the pointers + * + * @param seg the old tcp_seg + * @return a copy of seg + */ +struct tcp_seg * +tcp_seg_copy(struct tcp_seg *seg) +{ + struct tcp_seg *cseg; + + LWIP_ASSERT("tcp_seg_copy: invalid seg", seg != NULL); + + cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); + if (cseg == NULL) { + return NULL; + } + SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); + pbuf_ref(cseg->p); + return cseg; +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if LWIP_CALLBACK_API +/** + * Default receive callback that is called if the user didn't register + * a recv callback for the pcb. + */ +err_t +tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) +{ + LWIP_UNUSED_ARG(arg); + + LWIP_ERROR("tcp_recv_null: invalid pcb", pcb != NULL, return ERR_ARG); + + if (p != NULL) { + tcp_recved(pcb, p->tot_len); + pbuf_free(p); + } else if (err == ERR_OK) { + return tcp_close(pcb); + } + return ERR_OK; +} +#endif /* LWIP_CALLBACK_API */ + +/** + * Kills the oldest active connection that has a lower priority than 'prio'. + * + * @param prio minimum priority + */ +static void +tcp_kill_prio(u8_t prio) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + u8_t mprio; + + mprio = LWIP_MIN(TCP_PRIO_MAX, prio); + + /* We want to kill connections with a lower prio, so bail out if + * supplied prio is 0 - there can never be a lower prio + */ + if (mprio == 0) { + return; + } + + /* We only want kill connections with a lower prio, so decrement prio by one + * and start searching for oldest connection with same or lower priority than mprio. + * We want to find the connections with the lowest possible prio, and among + * these the one with the longest inactivity time. + */ + mprio--; + + inactivity = 0; + inactive = NULL; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + /* lower prio is always a kill candidate */ + if ((pcb->prio < mprio) || + /* longer inactivity is also a kill candidate */ + ((pcb->prio == mprio) && ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity))) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + mprio = pcb->prio; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/** + * Kills the oldest connection that is in specific state. + * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. + */ +static void +tcp_kill_state(enum tcp_state state) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); + + inactivity = 0; + inactive = NULL; + /* Go through the list of active pcbs and get the oldest pcb that is in state + CLOSING/LAST_ACK. */ + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->state == state) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", + tcp_state_str[state], (void *)inactive, inactivity)); + /* Don't send a RST, since no data is lost. */ + tcp_abandon(inactive, 0); + } +} + +/** + * Kills the oldest connection that is in TIME_WAIT state. + * Called from tcp_alloc() if no more connections are available. + */ +static void +tcp_kill_timewait(void) +{ + struct tcp_pcb *pcb, *inactive; + u32_t inactivity; + + inactivity = 0; + inactive = NULL; + /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { + inactivity = tcp_ticks - pcb->tmr; + inactive = pcb; + } + } + if (inactive != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", + (void *)inactive, inactivity)); + tcp_abort(inactive); + } +} + +/* Called when allocating a pcb fails. + * In this case, we want to handle all pcbs that want to close first: if we can + * now send the FIN (which failed before), the pcb might be in a state that is + * OK for us to now free it. + */ +static void +tcp_handle_closepend(void) +{ + struct tcp_pcb *pcb = tcp_active_pcbs; + + while (pcb != NULL) { + struct tcp_pcb *next = pcb->next; + /* send pending FIN */ + if (pcb->flags & TF_CLOSEPEND) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_handle_closepend: pending FIN\n")); + tcp_clear_flags(pcb, TF_CLOSEPEND); + tcp_close_shutdown_fin(pcb); + } + pcb = next; + } +} + +/** + * Allocate a new tcp_pcb structure. + * + * @param prio priority for the new pcb + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_alloc(u8_t prio) +{ + struct tcp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try to send FIN for all pcbs stuck in TF_CLOSEPEND first */ + tcp_handle_closepend(); + + /* Try killing oldest connection in TIME-WAIT. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); + tcp_kill_timewait(); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); + tcp_kill_state(LAST_ACK); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest connection in CLOSING. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); + tcp_kill_state(CLOSING); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb == NULL) { + /* Try killing oldest active connection with lower priority than the new one. */ + LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing oldest connection with prio lower than %d\n", prio)); + tcp_kill_prio(prio); + /* Try to allocate a tcp_pcb again. */ + pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed multiple times before */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* adjust err stats: memp_malloc failed above */ + MEMP_STATS_DEC(err, MEMP_TCP_PCB); + } + } + if (pcb != NULL) { + /* zero out the whole pcb, so there is no need to initialize members to zero */ + memset(pcb, 0, sizeof(struct tcp_pcb)); + pcb->prio = prio; + pcb->snd_buf = TCP_SND_BUF; + /* Start with a window that does not need scaling. When window scaling is + enabled and used, the window is enlarged when both sides agree on scaling. */ + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->ttl = TCP_TTL; + /* As initial send MSS, we use TCP_MSS but limit it to 536. + The send MSS is updated when an MSS option is received. */ + pcb->mss = INITIAL_MSS; + pcb->rto = 3000 / TCP_SLOW_INTERVAL; + pcb->sv = 3000 / TCP_SLOW_INTERVAL; + pcb->rtime = -1; + pcb->cwnd = 1; + pcb->tmr = tcp_ticks; + pcb->last_timer = tcp_timer_ctr; + + /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example + of using the largest advertised receive window. We've seen complications with + receiving TCPs that use window scaling and/or window auto-tuning where the + initial advertised window is very small and then grows rapidly once the + connection is established. To avoid these complications, we set ssthresh to the + largest effective cwnd (amount of in-flight data) that the sender can have. */ + pcb->ssthresh = TCP_SND_BUF; + +#if LWIP_CALLBACK_API + pcb->recv = tcp_recv_null; +#endif /* LWIP_CALLBACK_API */ + + /* Init KEEPALIVE timer */ + pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; + +#if LWIP_TCP_KEEPALIVE + pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; + pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; +#endif /* LWIP_TCP_KEEPALIVE */ + } + return pcb; +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't place it on + * any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * If memory is not available for creating the new pcb, NULL is returned. + * + * @internal: Maybe there should be a idle TCP PCB list where these + * PCBs are put on. Port reservation using tcp_bind() is implemented but + * allocated pcbs that are not bound can't be killed automatically if wanting + * to allocate a pcb with higher prio (@see tcp_kill_prio()) + * + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new(void) +{ + return tcp_alloc(TCP_PRIO_NORMAL); +} + +/** + * @ingroup tcp_raw + * Creates a new TCP protocol control block but doesn't + * place it on any of the TCP PCB lists. + * The pcb is not put on any list until binding using tcp_bind(). + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) connections, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return a new tcp_pcb that initially is in state CLOSED + */ +struct tcp_pcb * +tcp_new_ip_type(u8_t type) +{ + struct tcp_pcb *pcb; + pcb = tcp_alloc(TCP_PRIO_NORMAL); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** + * @ingroup tcp_raw + * Specifies the program specific state that should be passed to all + * other callback functions. The "pcb" argument is the current TCP + * connection control block, and the "arg" argument is the argument + * that will be passed to the callbacks. + * + * @param pcb tcp_pcb to set the callback argument + * @param arg void pointer argument to pass to callback functions + */ +void +tcp_arg(struct tcp_pcb *pcb, void *arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + /* This function is allowed to be called for both listen pcbs and + connection pcbs. */ + if (pcb != NULL) { + pcb->callback_arg = arg; + } +} +#if LWIP_CALLBACK_API + +/** + * @ingroup tcp_raw + * Sets the callback function that will be called when new data + * arrives. The callback function will be passed a NULL pbuf to + * indicate that the remote host has closed the connection. If the + * callback function returns ERR_OK or ERR_ABRT it must have + * freed the pbuf, otherwise it must not have freed it. + * + * @param pcb tcp_pcb to set the recv callback + * @param recv callback function to call for this pcb when data is received + */ +void +tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); + pcb->recv = recv; + } +} + +/** + * @ingroup tcp_raw + * Specifies the callback function that should be called when data has + * successfully been received (i.e., acknowledged) by the remote + * host. The len argument passed to the callback function gives the + * amount bytes that was acknowledged by the last acknowledgment. + * + * @param pcb tcp_pcb to set the sent callback + * @param sent callback function to call for this pcb when data is successfully sent + */ +void +tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); + pcb->sent = sent; + } +} + +/** + * @ingroup tcp_raw + * Used to specify the function that should be called when a fatal error + * has occurred on the connection. + * + * If a connection is aborted because of an error, the application is + * alerted of this event by the err callback. Errors that might abort a + * connection are when there is a shortage of memory. The callback + * function to be called is set using the tcp_err() function. + * + * @note The corresponding pcb is already freed when this callback is called! + * + * @param pcb tcp_pcb to set the err callback + * @param err callback function to call for this pcb when a fatal error + * has occurred on the connection + */ +void +tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) +{ + LWIP_ASSERT_CORE_LOCKED(); + if (pcb != NULL) { + LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); + pcb->errf = err; + } +} + +/** + * @ingroup tcp_raw + * Used for specifying the function that should be called when a + * LISTENing connection has been connected to another host. + * + * @param pcb tcp_pcb to set the accept callback + * @param accept callback function to call for this pcb when LISTENing + * connection has been connected to another host + */ +void +tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) +{ + LWIP_ASSERT_CORE_LOCKED(); + if ((pcb != NULL) && (pcb->state == LISTEN)) { + struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen *)pcb; + lpcb->accept = accept; + } +} +#endif /* LWIP_CALLBACK_API */ + + +/** + * @ingroup tcp_raw + * Specifies the polling interval and the callback function that should + * be called to poll the application. The interval is specified in + * number of TCP coarse grained timer shots, which typically occurs + * twice a second. An interval of 10 means that the application would + * be polled every 5 seconds. + * + * When a connection is idle (i.e., no data is either transmitted or + * received), lwIP will repeatedly poll the application by calling a + * specified callback function. This can be used either as a watchdog + * timer for killing connections that have stayed idle for too long, or + * as a method of waiting for memory to become available. For instance, + * if a call to tcp_write() has failed because memory wasn't available, + * the application may use the polling functionality to call tcp_write() + * again when the connection has been idle for a while. + */ +void +tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("tcp_poll: invalid pcb", pcb != NULL, return); + LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); + +#if LWIP_CALLBACK_API + pcb->poll = poll; +#else /* LWIP_CALLBACK_API */ + LWIP_UNUSED_ARG(poll); +#endif /* LWIP_CALLBACK_API */ + pcb->pollinterval = interval; +} + +/** + * Purges a TCP PCB. Removes any buffered data and frees the buffer memory + * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). + * + * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! + */ +void +tcp_pcb_purge(struct tcp_pcb *pcb) +{ + LWIP_ERROR("tcp_pcb_purge: invalid pcb", pcb != NULL, return); + + if (pcb->state != CLOSED && + pcb->state != TIME_WAIT && + pcb->state != LISTEN) { + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); + + tcp_backlog_accepted(pcb); + + if (pcb->refused_data != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); + pbuf_free(pcb->refused_data); + pcb->refused_data = NULL; + } + if (pcb->unsent != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); + } + if (pcb->unacked != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); + } +#if TCP_QUEUE_OOSEQ + if (pcb->ooseq != NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); + tcp_free_ooseq(pcb); + } +#endif /* TCP_QUEUE_OOSEQ */ + + /* Stop the retransmission timer as it will expect data on unacked + queue if it fires */ + pcb->rtime = -1; + + tcp_segs_free(pcb->unsent); + tcp_segs_free(pcb->unacked); + pcb->unacked = pcb->unsent = NULL; +#if TCP_OVERSIZE + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + } +} + +/** + * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. + * + * @param pcblist PCB list to purge. + * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! + */ +void +tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_pcb_remove: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_pcb_remove: invalid pcblist", pcblist != NULL); + + TCP_RMV(pcblist, pcb); + + tcp_pcb_purge(pcb); + + /* if there is an outstanding delayed ACKs, send it */ + if ((pcb->state != TIME_WAIT) && + (pcb->state != LISTEN) && + (pcb->flags & TF_ACK_DELAY)) { + tcp_ack_now(pcb); + tcp_output(pcb); + } + + if (pcb->state != LISTEN) { + LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); + LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); +#if TCP_QUEUE_OOSEQ + LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); +#endif /* TCP_QUEUE_OOSEQ */ + } + + pcb->state = CLOSED; + /* reset the local port to prevent the pcb from being 'bound' */ + pcb->local_port = 0; + + LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); +} + +/** + * Calculates a new initial sequence number for new connections. + * + * @return u32_t pseudo random sequence number + */ +u32_t +tcp_next_iss(struct tcp_pcb *pcb) +{ +#ifdef LWIP_HOOK_TCP_ISN + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); +#else /* LWIP_HOOK_TCP_ISN */ + static u32_t iss = 6510; + + LWIP_ASSERT("tcp_next_iss: invalid pcb", pcb != NULL); + LWIP_UNUSED_ARG(pcb); + + iss += tcp_ticks; /* XXX */ + return iss; +#endif /* LWIP_HOOK_TCP_ISN */ +} + +#if TCP_CALCULATE_EFF_SEND_MSS +/** + * Calculates the effective send mss that can be used for a specific IP address + * by calculating the minimum of TCP_MSS and the mtu (if set) of the target + * netif (if not NULL). + */ +u16_t +tcp_eff_send_mss_netif(u16_t sendmss, struct netif *outif, const ip_addr_t *dest) +{ + u16_t mss_s; + u16_t mtu; + + LWIP_UNUSED_ARG(dest); /* in case IPv6 is disabled */ + + LWIP_ASSERT("tcp_eff_send_mss_netif: invalid dst_ip", dest != NULL); + +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + /* First look in destination cache, to see if there is a Path MTU. */ + mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + if (outif == NULL) { + return sendmss; + } + mtu = outif->mtu; + } +#endif /* LWIP_IPV4 */ + + if (mtu != 0) { + u16_t offset; +#if LWIP_IPV6 +#if LWIP_IPV4 + if (IP_IS_V6(dest)) +#endif /* LWIP_IPV4 */ + { + offset = IP6_HLEN + TCP_HLEN; + } +#if LWIP_IPV4 + else +#endif /* LWIP_IPV4 */ +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + { + offset = IP_HLEN + TCP_HLEN; + } +#endif /* LWIP_IPV4 */ + mss_s = (mtu > offset) ? (u16_t)(mtu - offset) : 0; + /* RFC 1122, chap 4.2.2.6: + * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize + * We correct for TCP options in tcp_write(), and don't support IP options. + */ + sendmss = LWIP_MIN(sendmss, mss_s); + } + return sendmss; +} +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + +/** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ +static void +tcp_netif_ip_addr_changed_pcblist(const ip_addr_t *old_addr, struct tcp_pcb *pcb_list) +{ + struct tcp_pcb *pcb; + pcb = pcb_list; + + LWIP_ASSERT("tcp_netif_ip_addr_changed_pcblist: invalid old_addr", old_addr != NULL); + + while (pcb != NULL) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&pcb->local_ip, old_addr) +#if LWIP_AUTOIP + /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ + && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) +#endif /* LWIP_AUTOIP */ + ) { + /* this connection must be aborted */ + struct tcp_pcb *next = pcb->next; + LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); + tcp_abort(pcb); + pcb = next; + } else { + pcb = pcb->next; + } + } +} + +/** This function is called from netif.c when address is changed or netif is removed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change or NULL if netif has been removed + */ +void +tcp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct tcp_pcb_listen *lpcb; + + if (!ip_addr_isany(old_addr)) { + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); + tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); + + if (!ip_addr_isany(new_addr)) { + /* PCB bound to current local interface address? */ + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { + /* The PCB is listening to the old ipaddr and + * is set to listen to the new one instead */ + ip_addr_copy(lpcb->local_ip, *new_addr); + } + } + } + } +} + +const char * +tcp_debug_state_str(enum tcp_state s) +{ + return tcp_state_str[s]; +} + +err_t +tcp_tcp_get_tcp_addrinfo(struct tcp_pcb *pcb, int local, ip_addr_t *addr, u16_t *port) +{ + if (pcb) { + if (local) { + if (addr) { + *addr = pcb->local_ip; + } + if (port) { + *port = pcb->local_port; + } + } else { + if (addr) { + *addr = pcb->remote_ip; + } + if (port) { + *port = pcb->remote_port; + } + } + return ERR_OK; + } + return ERR_VAL; +} + +#if TCP_QUEUE_OOSEQ +/* Free all ooseq pbufs (and possibly reset SACK state) */ +void +tcp_free_ooseq(struct tcp_pcb *pcb) +{ + if (pcb->ooseq) { + tcp_segs_free(pcb->ooseq); + pcb->ooseq = NULL; +#if LWIP_TCP_SACK_OUT + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); +#endif /* LWIP_TCP_SACK_OUT */ + } +} +#endif /* TCP_QUEUE_OOSEQ */ + +#if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG +/** + * Print a tcp header for debugging purposes. + * + * @param tcphdr pointer to a struct tcp_hdr + */ +void +tcp_debug_print(struct tcp_hdr *tcphdr) +{ + LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", + lwip_ntohl(tcphdr->seqno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", + lwip_ntohl(tcphdr->ackno))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", + TCPH_HDRLEN(tcphdr), + (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), + (u16_t)(TCPH_FLAGS(tcphdr) & 1), + lwip_ntohs(tcphdr->wnd))); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", + lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); + LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); +} + +/** + * Print a tcp state for debugging purposes. + * + * @param s enum tcp_state to print + */ +void +tcp_debug_print_state(enum tcp_state s) +{ + LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); +} + +/** + * Print tcp flags for debugging purposes. + * + * @param flags tcp flags, all active flags are printed + */ +void +tcp_debug_print_flags(u8_t flags) +{ + if (flags & TCP_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); + } + if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); + } + if (flags & TCP_RST) { + LWIP_DEBUGF(TCP_DEBUG, ("RST ")); + } + if (flags & TCP_PSH) { + LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); + } + if (flags & TCP_ACK) { + LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); + } + if (flags & TCP_URG) { + LWIP_DEBUGF(TCP_DEBUG, ("URG ")); + } + if (flags & TCP_ECE) { + LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); + } + if (flags & TCP_CWR) { + LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); + } + LWIP_DEBUGF(TCP_DEBUG, ("\n")); +} + +/** + * Print all tcp_pcbs in every list for debugging purposes. + */ +void +tcp_debug_print_pcbs(void) +{ + struct tcp_pcb *pcb; + struct tcp_pcb_listen *pcbl; + + LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); + for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); + tcp_debug_print_state(pcbl->state); + } + + LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", + pcb->local_port, pcb->remote_port, + pcb->snd_nxt, pcb->rcv_nxt)); + tcp_debug_print_state(pcb->state); + } +} + +/** + * Check state consistency of the tcp_pcb lists. + */ +s16_t +tcp_pcbs_sane(void) +{ + struct tcp_pcb *pcb; + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); + LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + } + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + } + return 1; +} +#endif /* TCP_DEBUG */ + +#if LWIP_TCP_PCB_NUM_EXT_ARGS +/** + * @defgroup tcp_raw_extargs ext arguments + * @ingroup tcp_raw + * Additional data storage per tcp pcb\n + * @see @ref tcp_raw + * + * When LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + */ + +static u8_t tcp_ext_arg_id; + +/** + * @ingroup tcp_raw_extargs + * Allocate an index to store data in ext_args member of struct tcp_pcb. + * Returned value is an index in mentioned array. + * The index is *global* over all pcbs! + * + * When @ref LWIP_TCP_PCB_NUM_EXT_ARGS is > 0, every tcp pcb (including listen pcb) + * includes a number of additional argument entries in an array. + * + * To support memory management, in addition to a 'void *', callbacks can be + * provided to manage transition from listening pcbs to connections and to + * deallocate memory when a pcb is deallocated (see struct @ref tcp_ext_arg_callbacks). + * + * After allocating this index, use @ref tcp_ext_arg_set and @ref tcp_ext_arg_get + * to store and load arguments from this index for a given pcb. + * + * @return a unique index into struct tcp_pcb.ext_args + */ +u8_t +tcp_ext_arg_alloc_id(void) +{ + u8_t result = tcp_ext_arg_id; + tcp_ext_arg_id++; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS >= 255 +#error LWIP_TCP_PCB_NUM_EXT_ARGS +#endif + LWIP_ASSERT("Increase LWIP_TCP_PCB_NUM_EXT_ARGS in lwipopts.h", result < LWIP_TCP_PCB_NUM_EXT_ARGS); + return result; +} + +/** + * @ingroup tcp_raw_extargs + * Set callbacks for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the callback + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param callbacks callback table (const since it is referenced, not copied!) + */ +void +tcp_ext_arg_set_callbacks(struct tcp_pcb *pcb, uint8_t id, const struct tcp_ext_arg_callbacks * const callbacks) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + LWIP_ASSERT("callbacks != NULL", callbacks != NULL); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].callbacks = callbacks; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @param arg data pointer to set + */ +void tcp_ext_arg_set(struct tcp_pcb *pcb, uint8_t id, void *arg) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + pcb->ext_args[id].data = arg; +} + +/** + * @ingroup tcp_raw_extargs + * Set data for a given index of ext_args on the specified pcb. + * + * @param pcb tcp_pcb for which to set the data + * @param id ext_args index to set (allocated via @ref tcp_ext_arg_alloc_id) + * @return data pointer at the given index + */ +void *tcp_ext_arg_get(const struct tcp_pcb *pcb, uint8_t id) +{ + LWIP_ASSERT("pcb != NULL", pcb != NULL); + LWIP_ASSERT("id < LWIP_TCP_PCB_NUM_EXT_ARGS", id < LWIP_TCP_PCB_NUM_EXT_ARGS); + + LWIP_ASSERT_CORE_LOCKED(); + + return pcb->ext_args[id].data; +} + +/** This function calls the "destroy" callback for all ext_args once a pcb is + * freed. + */ +static void +tcp_ext_arg_invoke_callbacks_destroyed(struct tcp_pcb_ext_args *ext_args) +{ + int i; + LWIP_ASSERT("ext_args != NULL", ext_args != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (ext_args[i].callbacks != NULL) { + if (ext_args[i].callbacks->destroy != NULL) { + ext_args[i].callbacks->destroy((u8_t)i, ext_args[i].data); + } + } + } +} + +/** This function calls the "passive_open" callback for all ext_args if a connection + * is in the process of being accepted. This is called just after the SYN is + * received and before a SYN/ACK is sent, to allow to modify the very first + * segment sent even on passive open. Naturally, the "accepted" callback of the + * pcb has not been called yet! + */ +err_t +tcp_ext_arg_invoke_callbacks_passive_open(struct tcp_pcb_listen *lpcb, struct tcp_pcb *cpcb) +{ + int i; + LWIP_ASSERT("lpcb != NULL", lpcb != NULL); + LWIP_ASSERT("cpcb != NULL", cpcb != NULL); + + for (i = 0; i < LWIP_TCP_PCB_NUM_EXT_ARGS; i++) { + if (lpcb->ext_args[i].callbacks != NULL) { + if (lpcb->ext_args[i].callbacks->passive_open != NULL) { + err_t err = lpcb->ext_args[i].callbacks->passive_open((u8_t)i, lpcb, cpcb); + if (err != ERR_OK) { + return err; + } + } + } + } + return ERR_OK; +} +#endif /* LWIP_TCP_PCB_NUM_EXT_ARGS */ + +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c new file mode 100644 index 0000000..4670178 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_in.c @@ -0,0 +1,2178 @@ +/** + * @file + * Transmission Control Protocol, incoming traffic + * + * The input processing functions of the TCP layer. + * + * These functions are generally called in the order (ip_input() ->) + * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_ND6_TCP_REACHABILITY_HINTS +#include "lwip/nd6.h" +#endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/** Initial CWND calculation as defined RFC 2581 */ +#define LWIP_TCP_CALC_INITIAL_CWND(mss) ((tcpwnd_size_t)LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U))) + +/* These variables are global to all functions involved in the input + processing of TCP segments. They are set by the tcp_input() + function. */ +static struct tcp_seg inseg; +static struct tcp_hdr *tcphdr; +static u16_t tcphdr_optlen; +static u16_t tcphdr_opt1len; +static u8_t *tcphdr_opt2; +static u16_t tcp_optidx; +static u32_t seqno, ackno; +static tcpwnd_size_t recv_acked; +static u16_t tcplen; +static u8_t flags; + +static u8_t recv_flags; +static struct pbuf *recv_data; + +struct tcp_pcb *tcp_input_pcb; + +/* Forward declarations. */ +static err_t tcp_process(struct tcp_pcb *pcb); +static void tcp_receive(struct tcp_pcb *pcb); +static void tcp_parseopt(struct tcp_pcb *pcb); + +static void tcp_listen_input(struct tcp_pcb_listen *pcb); +static void tcp_timewait_input(struct tcp_pcb *pcb); + +static int tcp_input_delayed_close(struct tcp_pcb *pcb); + +#if LWIP_TCP_SACK_OUT +static void tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right); +static void tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq); +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +static void tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq); +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* LWIP_TCP_SACK_OUT */ + +/** + * The initial input processing of TCP. It verifies the TCP header, demultiplexes + * the segment between the PCBs and passes it on to tcp_process(), which implements + * the TCP finite state machine. This function is called by the IP layer (in + * ip_input()). + * + * @param p received TCP segment to process (p->payload pointing to the TCP header) + * @param inp network interface on which this segment was received + */ +void +tcp_input(struct pbuf *p, struct netif *inp) +{ + struct tcp_pcb *pcb, *prev; + struct tcp_pcb_listen *lpcb; +#if SO_REUSE + struct tcp_pcb *lpcb_prev = NULL; + struct tcp_pcb_listen *lpcb_any = NULL; +#endif /* SO_REUSE */ + u8_t hdrlen_bytes; + err_t err; + + LWIP_UNUSED_ARG(inp); + LWIP_ASSERT_CORE_LOCKED(); + LWIP_ASSERT("tcp_input: invalid pbuf", p != NULL); + + PERF_START; + + TCP_STATS_INC(tcp.recv); + MIB2_STATS_INC(mib2.tcpinsegs); + + tcphdr = (struct tcp_hdr *)p->payload; + +#if TCP_INPUT_DEBUG + tcp_debug_print(tcphdr); +#endif + + /* Check that TCP header fits in payload */ + if (p->len < TCP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Don't even process incoming broadcasts/multicasts. */ + if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || + ip_addr_ismulticast(ip_current_dest_addr())) { + TCP_STATS_INC(tcp.proterr); + goto dropped; + } + +#if CHECKSUM_CHECK_TCP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { + /* Verify TCP checksum. */ + u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + ip_current_src_addr(), ip_current_dest_addr()); + if (chksum != 0) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", + chksum)); + tcp_debug_print(tcphdr); + TCP_STATS_INC(tcp.chkerr); + goto dropped; + } + } +#endif /* CHECKSUM_CHECK_TCP */ + + /* sanity-check header length */ + hdrlen_bytes = TCPH_HDRLEN_BYTES(tcphdr); + if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* Move the payload pointer in the pbuf so that it points to the + TCP data instead of the TCP header. */ + tcphdr_optlen = (u16_t)(hdrlen_bytes - TCP_HLEN); + tcphdr_opt2 = NULL; + if (p->len >= hdrlen_bytes) { + /* all options are in the first pbuf */ + tcphdr_opt1len = tcphdr_optlen; + pbuf_remove_header(p, hdrlen_bytes); /* cannot fail */ + } else { + u16_t opt2len; + /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ + /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ + LWIP_ASSERT("p->next != NULL", p->next != NULL); + + /* advance over the TCP header (cannot fail) */ + pbuf_remove_header(p, TCP_HLEN); + + /* determine how long the first and second parts of the options are */ + tcphdr_opt1len = p->len; + opt2len = (u16_t)(tcphdr_optlen - tcphdr_opt1len); + + /* options continue in the next pbuf: set p to zero length and hide the + options in the next pbuf (adjusting p->tot_len) */ + pbuf_remove_header(p, tcphdr_opt1len); + + /* check that the options fit in the second pbuf */ + if (opt2len > p->next->len) { + /* drop short packets */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + + /* remember the pointer to the second part of the options */ + tcphdr_opt2 = (u8_t *)p->next->payload; + + /* advance p->next to point after the options, and manually + adjust p->tot_len to keep it consistent with the changed p->next */ + pbuf_remove_header(p->next, opt2len); + p->tot_len = (u16_t)(p->tot_len - opt2len); + + LWIP_ASSERT("p->len == 0", p->len == 0); + LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); + } + + /* Convert fields in TCP header to host byte order. */ + tcphdr->src = lwip_ntohs(tcphdr->src); + tcphdr->dest = lwip_ntohs(tcphdr->dest); + seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); + ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); + tcphdr->wnd = lwip_ntohs(tcphdr->wnd); + + flags = TCPH_FLAGS(tcphdr); + tcplen = p->tot_len; + if (flags & (TCP_FIN | TCP_SYN)) { + tcplen++; + if (tcplen < p->tot_len) { + /* u16_t overflow, cannot handle this */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: length u16_t overflow, cannot handle this\n")); + TCP_STATS_INC(tcp.lenerr); + goto dropped; + } + } + + /* Demultiplex an incoming segment. First, we check if it is destined + for an active connection. */ + prev = NULL; + + for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); + LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); + LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = pcb; + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); + if (prev != NULL) { + prev->next = pcb->next; + pcb->next = tcp_active_pcbs; + tcp_active_pcbs = pcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); + break; + } + prev = pcb; + } + + if (pcb == NULL) { + /* If it did not go to an active connection, we check the connections + in the TIME-WAIT state. */ + for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { + LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + continue; + } + + if (pcb->remote_port == tcphdr->src && + pcb->local_port == tcphdr->dest && + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && + ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + /* We don't really care enough to move this PCB to the front + of the list since we are not very likely to receive that + many segments for connections in TIME-WAIT. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, tcphdr_opt1len, + tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_timewait_input(pcb); + } + pbuf_free(p); + return; + } + } + + /* Finally, if we still did not get a match, we check all PCBs that + are LISTENing for incoming connections. */ + prev = NULL; + for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { + /* check if PCB is bound to specific netif */ + if ((lpcb->netif_idx != NETIF_NO_INDEX) && + (lpcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + prev = (struct tcp_pcb *)lpcb; + continue; + } + + if (lpcb->local_port == tcphdr->dest) { + if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { + /* found an ANY TYPE (IPv4/IPv6) match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { + if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { + /* found an exact match */ + break; + } else if (ip_addr_isany(&lpcb->local_ip)) { + /* found an ANY-match */ +#if SO_REUSE + lpcb_any = lpcb; + lpcb_prev = prev; +#else /* SO_REUSE */ + break; +#endif /* SO_REUSE */ + } + } + } + prev = (struct tcp_pcb *)lpcb; + } +#if SO_REUSE + /* first try specific local IP */ + if (lpcb == NULL) { + /* only pass to ANY if no specific local IP has been found */ + lpcb = lpcb_any; + prev = lpcb_prev; + } +#endif /* SO_REUSE */ + if (lpcb != NULL) { + /* Move this PCB to the front of the list so that subsequent + lookups will be faster (we exploit locality in TCP segment + arrivals). */ + if (prev != NULL) { + ((struct tcp_pcb_listen *)prev)->next = lpcb->next; + /* our successor is the remainder of the listening list */ + lpcb->next = tcp_listen_pcbs.listen_pcbs; + /* put this listening pcb at the head of the listening list */ + tcp_listen_pcbs.listen_pcbs = lpcb; + } else { + TCP_STATS_INC(tcp.cachehit); + } + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if (LWIP_HOOK_TCP_INPACKET_PCB((struct tcp_pcb *)lpcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) == ERR_OK) +#endif + { + tcp_listen_input(lpcb); + } + pbuf_free(p); + return; + } + } + +#if TCP_INPUT_DEBUG + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); + tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); +#endif /* TCP_INPUT_DEBUG */ + + +#ifdef LWIP_HOOK_TCP_INPACKET_PCB + if ((pcb != NULL) && LWIP_HOOK_TCP_INPACKET_PCB(pcb, tcphdr, tcphdr_optlen, + tcphdr_opt1len, tcphdr_opt2, p) != ERR_OK) { + pbuf_free(p); + return; + } +#endif + if (pcb != NULL) { + /* The incoming segment belongs to a connection. */ +#if TCP_INPUT_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_INPUT_DEBUG */ + + /* Set up a tcp_seg structure. */ + inseg.next = NULL; + inseg.len = p->tot_len; + inseg.p = p; + inseg.tcphdr = tcphdr; + + recv_data = NULL; + recv_flags = 0; + recv_acked = 0; + + if (flags & TCP_PSH) { + p->flags |= PBUF_FLAG_PUSH; + } + + /* If there is data which was previously "refused" by upper layer */ + if (pcb->refused_data != NULL) { + if ((tcp_process_refused_data(pcb) == ERR_ABRT) || + ((pcb->refused_data != NULL) && (tcplen > 0))) { + /* pcb has been aborted or refused data is still refused and the new + segment contains data */ + if (pcb->rcv_ann_wnd == 0) { + /* this is a zero-window probe, we respond to it with current RCV.NXT + and drop the data segment */ + tcp_send_empty_ack(pcb); + } + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + goto aborted; + } + } + tcp_input_pcb = pcb; + err = tcp_process(pcb); + /* A return value of ERR_ABRT means that tcp_abort() was called + and that the pcb has been freed. If so, we don't do anything. */ + if (err != ERR_ABRT) { + if (recv_flags & TF_RESET) { + /* TF_RESET means that the connection was reset by the other + end. We then call the error callback to inform the + application that the connection is dead before we + deallocate the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + } else { + err = ERR_OK; + /* If the application has registered a "sent" function to be + called when new send buffer space is available, we call it + now. */ + if (recv_acked > 0) { + u16_t acked16; +#if LWIP_WND_SCALE + /* recv_acked is u32_t but the sent callback only takes a u16_t, + so we might have to call it multiple times. */ + u32_t acked = recv_acked; + while (acked > 0) { + acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); + acked -= acked16; +#else + { + acked16 = recv_acked; +#endif + TCP_EVENT_SENT(pcb, (u16_t)acked16, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + recv_acked = 0; + } + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + while (recv_data != NULL) { + struct pbuf *rest = NULL; + pbuf_split_64k(recv_data, &rest); +#else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + if (recv_data != NULL) { +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + + LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); + if (pcb->flags & TF_RXCLOSED) { + /* received data although already closed -> abort (send RST) to + notify the remote host that not all data has been processed */ + pbuf_free(recv_data); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + tcp_abort(pcb); + goto aborted; + } + + /* Notify application that data has been received. */ + TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); + if (err == ERR_ABRT) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_free(rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + goto aborted; + } + + /* If the upper layer can't receive this data, store it */ + if (err != ERR_OK) { +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + if (rest != NULL) { + pbuf_cat(recv_data, rest); + } +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + pcb->refused_data = recv_data; + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); +#if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE + break; + } else { + /* Upper layer received the data, go on with the rest if > 64K */ + recv_data = rest; +#endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ + } + } + + /* If a FIN segment was received, we call the callback + function with a NULL buffer to indicate EOF. */ + if (recv_flags & TF_GOT_FIN) { + if (pcb->refused_data != NULL) { + /* Delay this if we have refused data. */ + pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; + } else { + /* correct rcv_wnd as the application won't call tcp_recved() + for the FIN's seqno */ + if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { + pcb->rcv_wnd++; + } + TCP_EVENT_CLOSED(pcb, err); + if (err == ERR_ABRT) { + goto aborted; + } + } + } + + tcp_input_pcb = NULL; + if (tcp_input_delayed_close(pcb)) { + goto aborted; + } + /* Try to send something out. */ + tcp_output(pcb); +#if TCP_INPUT_DEBUG +#if TCP_DEBUG + tcp_debug_print_state(pcb->state); +#endif /* TCP_DEBUG */ +#endif /* TCP_INPUT_DEBUG */ + } + } + /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). + Below this line, 'pcb' may not be dereferenced! */ +aborted: + tcp_input_pcb = NULL; + recv_data = NULL; + + /* give up our reference to inseg.p */ + if (inseg.p != NULL) { + pbuf_free(inseg.p); + inseg.p = NULL; + } + } else { + /* If no matching PCB was found, send a TCP RST (reset) to the + sender. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); + if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { + TCP_STATS_INC(tcp.proterr); + TCP_STATS_INC(tcp.drop); + tcp_rst(NULL, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + pbuf_free(p); + } + + LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); + PERF_STOP("tcp_input"); + return; +dropped: + TCP_STATS_INC(tcp.drop); + MIB2_STATS_INC(mib2.tcpinerrs); + pbuf_free(p); +} + +/** Called from tcp_input to check for TF_CLOSED flag. This results in closing + * and deallocating a pcb at the correct place to ensure noone references it + * any more. + * @returns 1 if the pcb has been closed and deallocated, 0 otherwise + */ +static int +tcp_input_delayed_close(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_input_delayed_close: invalid pcb", pcb != NULL); + + if (recv_flags & TF_CLOSED) { + /* The connection has been closed and we will deallocate the + PCB. */ + if (!(pcb->flags & TF_RXCLOSED)) { + /* Connection closed although the application has only shut down the + tx side: call the PCB's err callback and indicate the closure to + ensure the application doesn't continue using the PCB. */ + TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); + } + tcp_pcb_remove(&tcp_active_pcbs, pcb); + tcp_free(pcb); + return 1; + } + return 0; +} + +/** + * Called by tcp_input() when a segment arrives for a listening + * connection (from tcp_input()). + * + * @param pcb the tcp_pcb_listen for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_listen_input(struct tcp_pcb_listen *pcb) +{ + struct tcp_pcb *npcb; + u32_t iss; + err_t rc; + + if (flags & TCP_RST) { + /* An incoming RST should be ignored. Return. */ + return; + } + + LWIP_ASSERT("tcp_listen_input: invalid pcb", pcb != NULL); + + /* In the LISTEN state, we check for incoming SYN segments, + creates a new PCB, and responds with a SYN|ACK. */ + if (flags & TCP_ACK) { + /* For incoming segments with the ACK flag set, respond with a + RST. */ + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); + tcp_rst((const struct tcp_pcb *)pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } else if (flags & TCP_SYN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); +#if TCP_LISTEN_BACKLOG + if (pcb->accepts_pending >= pcb->backlog) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); + return; + } +#endif /* TCP_LISTEN_BACKLOG */ + npcb = tcp_alloc(pcb->prio); + /* If a new PCB could not be created (probably due to lack of memory), + we don't do anything, but rely on the sender will retransmit the + SYN at a time when we have more memory available. */ + if (npcb == NULL) { + err_t err; + LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); + TCP_STATS_INC(tcp.memerr); + TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); + LWIP_UNUSED_ARG(err); /* err not useful here */ + return; + } +#if TCP_LISTEN_BACKLOG + pcb->accepts_pending++; + tcp_set_flags(npcb, TF_BACKLOGPEND); +#endif /* TCP_LISTEN_BACKLOG */ + /* Set up the new PCB. */ + ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); + ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); + npcb->local_port = pcb->local_port; + npcb->remote_port = tcphdr->src; + npcb->state = SYN_RCVD; + npcb->rcv_nxt = seqno + 1; + npcb->rcv_ann_right_edge = npcb->rcv_nxt; + iss = tcp_next_iss(npcb); + npcb->snd_wl2 = iss; + npcb->snd_nxt = iss; + npcb->lastack = iss; + npcb->snd_lbb = iss; + npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ + npcb->callback_arg = pcb->callback_arg; +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + npcb->listener = pcb; +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + /* inherit socket options */ + npcb->so_options = pcb->so_options & SOF_INHERITED; + npcb->netif_idx = pcb->netif_idx; + /* Register the new PCB so that we can begin receiving segments + for it. */ + TCP_REG_ACTIVE(npcb); + + /* Parse any options in the SYN. */ + tcp_parseopt(npcb); + npcb->snd_wnd = tcphdr->wnd; + npcb->snd_wnd_max = npcb->snd_wnd; + +#if TCP_CALCULATE_EFF_SEND_MSS + npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + MIB2_STATS_INC(mib2.tcppassiveopens); + +#if LWIP_TCP_PCB_NUM_EXT_ARGS + if (tcp_ext_arg_invoke_callbacks_passive_open(pcb, npcb) != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } +#endif + + /* Send a SYN|ACK together with the MSS option. */ + rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); + if (rc != ERR_OK) { + tcp_abandon(npcb, 0); + return; + } + tcp_output(npcb); + } + return; +} + +/** + * Called by tcp_input() when a segment arrives for a connection in + * TIME_WAIT. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static void +tcp_timewait_input(struct tcp_pcb *pcb) +{ + /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ + /* RFC 793 3.9 Event Processing - Segment Arrives: + * - first check sequence number - we skip that one in TIME_WAIT (always + * acceptable since we only send ACKs) + * - second check the RST bit (... return) */ + if (flags & TCP_RST) { + return; + } + + LWIP_ASSERT("tcp_timewait_input: invalid pcb", pcb != NULL); + + /* - fourth, check the SYN bit, */ + if (flags & TCP_SYN) { + /* If an incoming segment is not acceptable, an acknowledgment + should be sent in reply */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the SYN is in the window it is an error, send a reset */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + return; + } + } else if (flags & TCP_FIN) { + /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. + Restart the 2 MSL time-wait timeout.*/ + pcb->tmr = tcp_ticks; + } + + if ((tcplen > 0)) { + /* Acknowledge data, FIN or out-of-window SYN */ + tcp_ack_now(pcb); + tcp_output(pcb); + } + return; +} + +/** + * Implements the TCP state machine. Called by tcp_input. In some + * states tcp_receive() is called to receive data. The tcp_seg + * argument will be freed by the caller (tcp_input()) unless the + * recv_data pointer in the pcb is set. + * + * @param pcb the tcp_pcb for which a segment arrived + * + * @note the segment which arrived is saved in global variables, therefore only the pcb + * involved is passed as a parameter to this function + */ +static err_t +tcp_process(struct tcp_pcb *pcb) +{ + struct tcp_seg *rseg; + u8_t acceptable = 0; + err_t err; + + err = ERR_OK; + + LWIP_ASSERT("tcp_process: invalid pcb", pcb != NULL); + + /* Process incoming RST segments. */ + if (flags & TCP_RST) { + /* First, determine if the reset is acceptable. */ + if (pcb->state == SYN_SENT) { + /* "In the SYN-SENT state (a RST received in response to an initial SYN), + the RST is acceptable if the ACK field acknowledges the SYN." */ + if (ackno == pcb->snd_nxt) { + acceptable = 1; + } + } else { + /* "In all states except SYN-SENT, all reset (RST) segments are validated + by checking their SEQ-fields." */ + if (seqno == pcb->rcv_nxt) { + acceptable = 1; + } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd)) { + /* If the sequence number is inside the window, we send a challenge ACK + and wait for a re-send with matching sequence number. + This follows RFC 5961 section 3.2 and addresses CVE-2004-0230 + (RST spoofing attack), which is present in RFC 793 RST handling. */ + tcp_ack_now(pcb); + } + } + + if (acceptable) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); + LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); + recv_flags |= TF_RESET; + tcp_clear_flags(pcb, TF_ACK_DELAY); + return ERR_RST; + } else { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", + seqno, pcb->rcv_nxt)); + return ERR_OK; + } + } + + if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { + /* Cope with new connection attempt after remote end crashed */ + tcp_ack_now(pcb); + return ERR_OK; + } + + if ((pcb->flags & TF_RXCLOSED) == 0) { + /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ + pcb->tmr = tcp_ticks; + } + pcb->keep_cnt_sent = 0; + pcb->persist_probe = 0; + + tcp_parseopt(pcb); + + /* Do different things depending on the TCP state. */ + switch (pcb->state) { + case SYN_SENT: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, + pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); + /* received SYN ACK with expected sequence number? */ + if ((flags & TCP_ACK) && (flags & TCP_SYN) + && (ackno == pcb->lastack + 1)) { + pcb->rcv_nxt = seqno + 1; + pcb->rcv_ann_right_edge = pcb->rcv_nxt; + pcb->lastack = ackno; + pcb->snd_wnd = tcphdr->wnd; + pcb->snd_wnd_max = pcb->snd_wnd; + pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ + pcb->state = ESTABLISHED; + +#if TCP_CALCULATE_EFF_SEND_MSS + pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); + --pcb->snd_queuelen; + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + rseg = pcb->unacked; + if (rseg == NULL) { + /* might happen if tcp_output fails in tcp_rexmit_rto() + in which case the segment is on the unsent list */ + rseg = pcb->unsent; + LWIP_ASSERT("no segment to free", rseg != NULL); + pcb->unsent = rseg->next; + } else { + pcb->unacked = rseg->next; + } + tcp_seg_free(rseg); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + pcb->nrtx = 0; + } + + /* Call the user specified function to call when successfully + * connected. */ + TCP_EVENT_CONNECTED(pcb, ERR_OK, err); + if (err == ERR_ABRT) { + return ERR_ABRT; + } + tcp_ack_now(pcb); + } + /* received ACK? possibly a half-open connection */ + else if (flags & TCP_ACK) { + /* send a RST to bring the other side in a non-synchronized state. */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + /* Resend SYN immediately (don't wait for rto timeout) to establish + connection faster, but do not send more SYNs than we otherwise would + have, or we might get caught in a loop on loopback interfaces. */ + if (pcb->nrtx < TCP_SYNMAXRTX) { + pcb->rtime = 0; + tcp_rexmit_rto(pcb); + } + } + break; + case SYN_RCVD: + if (flags & TCP_ACK) { + /* expected ACK number? */ + if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + pcb->state = ESTABLISHED; + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); +#if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG + if (pcb->listener == NULL) { + /* listen pcb might be closed by now */ + err = ERR_VAL; + } else +#endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ + { +#if LWIP_CALLBACK_API + LWIP_ASSERT("pcb->listener->accept != NULL", pcb->listener->accept != NULL); +#endif + tcp_backlog_accepted(pcb); + /* Call the accept function. */ + TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); + } + if (err != ERR_OK) { + /* If the accept function returns with an error, we abort + * the connection. */ + /* Already aborted? */ + if (err != ERR_ABRT) { + tcp_abort(pcb); + } + return ERR_ABRT; + } + /* If there was any data contained within this ACK, + * we'd better pass it on to the application as well. */ + tcp_receive(pcb); + + /* Prevent ACK for SYN to generate a sent event */ + if (recv_acked != 0) { + recv_acked--; + } + + pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F + " ssthresh %"TCPWNDSIZE_F"\n", + pcb->cwnd, pcb->ssthresh)); + + if (recv_flags & TF_GOT_FIN) { + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + } else { + /* incorrect ACK number, send RST */ + tcp_rst(pcb, ackno, seqno + tcplen, ip_current_dest_addr(), + ip_current_src_addr(), tcphdr->dest, tcphdr->src); + } + } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { + /* Looks like another copy of the SYN - retransmit our SYN-ACK */ + tcp_rexmit(pcb); + } + break; + case CLOSE_WAIT: + /* FALLTHROUGH */ + case ESTABLISHED: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { /* passive close */ + tcp_ack_now(pcb); + pcb->state = CLOSE_WAIT; + } + break; + case FIN_WAIT_1: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } else { + tcp_ack_now(pcb); + pcb->state = CLOSING; + } + } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && + pcb->unsent == NULL) { + pcb->state = FIN_WAIT_2; + } + break; + case FIN_WAIT_2: + tcp_receive(pcb); + if (recv_flags & TF_GOT_FIN) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_ack_now(pcb); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case CLOSING: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + tcp_pcb_purge(pcb); + TCP_RMV_ACTIVE(pcb); + pcb->state = TIME_WAIT; + TCP_REG(&tcp_tw_pcbs, pcb); + } + break; + case LAST_ACK: + tcp_receive(pcb); + if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); + /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ + recv_flags |= TF_CLOSED; + } + break; + default: + break; + } + return ERR_OK; +} + +#if TCP_QUEUE_OOSEQ +/** + * Insert segment into the list (segments covered with new one will be deleted) + * + * Called from tcp_receive() + */ +static void +tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) +{ + struct tcp_seg *old_seg; + + LWIP_ASSERT("tcp_oos_insert_segment: invalid cseg", cseg != NULL); + + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + /* received segment overlaps all following segments */ + tcp_segs_free(next); + next = NULL; + } else { + /* delete some following segments + oos queue may have segments with FIN flag */ + while (next && + TCP_SEQ_GEQ((seqno + cseg->len), + (next->tcphdr->seqno + next->len))) { + /* cseg with FIN already processed */ + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); + } + old_seg = next; + next = next->next; + tcp_seg_free(old_seg); + } + if (next && + TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { + /* We need to trim the incoming segment. */ + cseg->len = (u16_t)(next->tcphdr->seqno - seqno); + pbuf_realloc(cseg->p, cseg->len); + } + } + cseg->next = next; +} +#endif /* TCP_QUEUE_OOSEQ */ + +/** Remove segments from a list if the incoming ACK acknowledges them */ +static struct tcp_seg * +tcp_free_acked_segments(struct tcp_pcb *pcb, struct tcp_seg *seg_list, const char *dbg_list_name, + struct tcp_seg *dbg_other_seg_list) +{ + struct tcp_seg *next; + u16_t clen; + + LWIP_UNUSED_ARG(dbg_list_name); + LWIP_UNUSED_ARG(dbg_other_seg_list); + + while (seg_list != NULL && + TCP_SEQ_LEQ(lwip_ntohl(seg_list->tcphdr->seqno) + + TCP_TCPLEN(seg_list), ackno)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->%s\n", + lwip_ntohl(seg_list->tcphdr->seqno), + lwip_ntohl(seg_list->tcphdr->seqno) + TCP_TCPLEN(seg_list), + dbg_list_name)); + + next = seg_list; + seg_list = seg_list->next; + + clen = pbuf_clen(next->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", + (tcpwnd_size_t)pcb->snd_queuelen)); + LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= clen)); + + pcb->snd_queuelen = (u16_t)(pcb->snd_queuelen - clen); + recv_acked = (tcpwnd_size_t)(recv_acked + next->len); + tcp_seg_free(next); + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing %s)\n", + (tcpwnd_size_t)pcb->snd_queuelen, + dbg_list_name)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_receive: valid queue length", + seg_list != NULL || dbg_other_seg_list != NULL); + } + } + return seg_list; +} + +/** + * Called by tcp_process. Checks if the given segment is an ACK for outstanding + * data, and if so frees the memory of the buffered data. Next, it places the + * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment + * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until + * it has been removed from the buffer. + * + * If the incoming segment constitutes an ACK for a segment that was used for RTT + * estimation, the RTT is estimated here as well. + * + * Called from tcp_process(). + */ +static void +tcp_receive(struct tcp_pcb *pcb) +{ + s16_t m; + u32_t right_wnd_edge; + int found_dupack = 0; + + LWIP_ASSERT("tcp_receive: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); + + if (flags & TCP_ACK) { + right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; + + /* Update window. */ + if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || + (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || + (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { + pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); + /* keep track of the biggest window announced by the remote host to calculate + the maximum segment size */ + if (pcb->snd_wnd_max < pcb->snd_wnd) { + pcb->snd_wnd_max = pcb->snd_wnd; + } + pcb->snd_wl1 = seqno; + pcb->snd_wl2 = ackno; + LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); +#if TCP_WND_DEBUG + } else { + if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { + LWIP_DEBUGF(TCP_WND_DEBUG, + ("tcp_receive: no window update lastack %"U32_F" ackno %" + U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", + pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); + } +#endif /* TCP_WND_DEBUG */ + } + + /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a + * duplicate ack if: + * 1) It doesn't ACK new data + * 2) length of received packet is zero (i.e. no payload) + * 3) the advertised window hasn't changed + * 4) There is outstanding unacknowledged data (retransmission timer running) + * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) + * + * If it passes all five, should process as a dupack: + * a) dupacks < 3: do nothing + * b) dupacks == 3: fast retransmit + * c) dupacks > 3: increase cwnd + * + * If it only passes 1-3, should reset dupack counter (and add to + * stats, which we don't do in lwIP) + * + * If it only passes 1, should reset dupack counter + * + */ + + /* Clause 1 */ + if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { + /* Clause 2 */ + if (tcplen == 0) { + /* Clause 3 */ + if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { + /* Clause 4 */ + if (pcb->rtime >= 0) { + /* Clause 5 */ + if (pcb->lastack == ackno) { + found_dupack = 1; + if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { + ++pcb->dupacks; + } + if (pcb->dupacks > 3) { + /* Inflate the congestion window */ + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + if (pcb->dupacks >= 3) { + /* Do fast retransmit (checked via TF_INFR, not via dupacks count) */ + tcp_rexmit_fast(pcb); + } + } + } + } + } + /* If Clause (1) or more is true, but not a duplicate ack, reset + * count of consecutive duplicate acks */ + if (!found_dupack) { + pcb->dupacks = 0; + } + } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack + 1, pcb->snd_nxt)) { + /* We come here when the ACK acknowledges new data. */ + tcpwnd_size_t acked; + + /* Reset the "IN Fast Retransmit" flag, since we are no longer + in fast retransmit. Also reset the congestion window to the + slow start threshold. */ + if (pcb->flags & TF_INFR) { + tcp_clear_flags(pcb, TF_INFR); + pcb->cwnd = pcb->ssthresh; + pcb->bytes_acked = 0; + } + + /* Reset the number of retransmissions. */ + pcb->nrtx = 0; + + /* Reset the retransmission time-out. */ + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + /* Record how much data this ACK acks */ + acked = (tcpwnd_size_t)(ackno - pcb->lastack); + + /* Reset the fast retransmit variables. */ + pcb->dupacks = 0; + pcb->lastack = ackno; + + /* Update the congestion control variables (cwnd and + ssthresh). */ + if (pcb->state >= ESTABLISHED) { + if (pcb->cwnd < pcb->ssthresh) { + tcpwnd_size_t increase; + /* limit to 1 SMSS segment during period following RTO */ + u8_t num_seg = (pcb->flags & TF_RTO) ? 1 : 2; + /* RFC 3465, section 2.2 Slow Start */ + increase = LWIP_MIN(acked, (tcpwnd_size_t)(num_seg * pcb->mss)); + TCP_WND_INC(pcb->cwnd, increase); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } else { + /* RFC 3465, section 2.1 Congestion Avoidance */ + TCP_WND_INC(pcb->bytes_acked, acked); + if (pcb->bytes_acked >= pcb->cwnd) { + pcb->bytes_acked = (tcpwnd_size_t)(pcb->bytes_acked - pcb->cwnd); + TCP_WND_INC(pcb->cwnd, pcb->mss); + } + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); + } + } + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", + ackno, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) : 0, + pcb->unacked != NULL ? + lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked) : 0)); + + /* Remove segment from the unacknowledged list if the incoming + ACK acknowledges them. */ + pcb->unacked = tcp_free_acked_segments(pcb, pcb->unacked, "unacked", pcb->unsent); + /* We go through the ->unsent list to see if any of the segments + on the list are acknowledged by the ACK. This may seem + strange since an "unsent" segment shouldn't be acked. The + rationale is that lwIP puts all outstanding segments on the + ->unsent list after a retransmission, so these segments may + in fact have been sent once. */ + pcb->unsent = tcp_free_acked_segments(pcb, pcb->unsent, "unsent", pcb->unacked); + + /* If there's nothing left to acknowledge, stop the retransmit + timer, otherwise reset it to start again */ + if (pcb->unacked == NULL) { + pcb->rtime = -1; + } else { + pcb->rtime = 0; + } + + pcb->polltmr = 0; + +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + pcb->snd_buf = (tcpwnd_size_t)(pcb->snd_buf + recv_acked); + /* check if this ACK ends our retransmission of in-flight data */ + if (pcb->flags & TF_RTO) { + /* RTO is done if + 1) both queues are empty or + 2) unacked is empty and unsent head contains data not part of RTO or + 3) unacked head contains data not part of RTO */ + if (pcb->unacked == NULL) { + if ((pcb->unsent == NULL) || + (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unsent->tcphdr->seqno)))) { + tcp_clear_flags(pcb, TF_RTO); + } + } else if (TCP_SEQ_LEQ(pcb->rto_end, lwip_ntohl(pcb->unacked->tcphdr->seqno))) { + tcp_clear_flags(pcb, TF_RTO); + } + } + /* End of ACK for new data processing. */ + } else { + /* Out of sequence ACK, didn't really ack anything */ + tcp_send_empty_ack(pcb); + } + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", + pcb->rttest, pcb->rtseq, ackno)); + + /* RTT estimation calculations. This is done by checking if the + incoming segment acknowledges the segment we use to take a + round-trip time measurement. */ + if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { + /* diff between this shouldn't exceed 32K since this are tcp timer ticks + and a round-trip shouldn't be that long... */ + m = (s16_t)(tcp_ticks - pcb->rttest); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", + m, (u16_t)(m * TCP_SLOW_INTERVAL))); + + /* This is taken directly from VJs original code in his paper */ + m = (s16_t)(m - (pcb->sa >> 3)); + pcb->sa = (s16_t)(pcb->sa + m); + if (m < 0) { + m = (s16_t) - m; + } + m = (s16_t)(m - (pcb->sv >> 2)); + pcb->sv = (s16_t)(pcb->sv + m); + pcb->rto = (s16_t)((pcb->sa >> 3) + pcb->sv); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", + pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); + + pcb->rttest = 0; + } + } + + /* If the incoming segment contains data, we must process it + further unless the pcb already received a FIN. + (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, + LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ + if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { + /* This code basically does three things: + + +) If the incoming segment contains data that is the next + in-sequence data, this data is passed to the application. This + might involve trimming the first edge of the data. The rcv_nxt + variable and the advertised window are adjusted. + + +) If the incoming segment has data that is above the next + sequence number expected (->rcv_nxt), the segment is placed on + the ->ooseq queue. This is done by finding the appropriate + place in the ->ooseq queue (which is ordered by sequence + number) and trim the segment in both ends if needed. An + immediate ACK is sent to indicate that we received an + out-of-sequence segment. + + +) Finally, we check if the first segment on the ->ooseq queue + now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If + rcv_nxt > ooseq->seqno, we must trim the first edge of the + segment on ->ooseq before we adjust rcv_nxt. The data in the + segments that are now on sequence are chained onto the + incoming segment so that we only need to call the application + once. + */ + + /* First, we check if we must trim the first edge. We have to do + this if the sequence number of the incoming segment is less + than rcv_nxt, and the sequence number plus the length of the + segment is larger than rcv_nxt. */ + /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ + if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { + /* Trimming the first edge is done by pushing the payload + pointer in the pbuf downwards. This is somewhat tricky since + we do not want to discard the full contents of the pbuf up to + the new starting point of the data since we have to keep the + TCP header which is present in the first pbuf in the chain. + + What is done is really quite a nasty hack: the first pbuf in + the pbuf chain is pointed to by inseg.p. Since we need to be + able to deallocate the whole pbuf, we cannot change this + inseg.p pointer to point to any of the later pbufs in the + chain. Instead, we point the ->payload pointer in the first + pbuf to data in one of the later pbufs. We also set the + inseg.data pointer to point to the right place. This way, the + ->p pointer will still point to the first pbuf, but the + ->p->payload pointer will point to data in another pbuf. + + After we are done with adjusting the pbuf pointers we must + adjust the ->data pointer in the seg and the segment + length.*/ + + struct pbuf *p = inseg.p; + u32_t off32 = pcb->rcv_nxt - seqno; + u16_t new_tot_len, off; + LWIP_ASSERT("inseg.p != NULL", inseg.p); + LWIP_ASSERT("insane offset!", (off32 < 0xffff)); + off = (u16_t)off32; + LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); + inseg.len -= off; + new_tot_len = (u16_t)(inseg.p->tot_len - off); + while (p->len < off) { + off -= p->len; + /* all pbufs up to and including this one have len==0, so tot_len is equal */ + p->tot_len = new_tot_len; + p->len = 0; + p = p->next; + } + /* cannot fail... */ + pbuf_remove_header(p, off); + inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; + } else { + if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { + /* the whole segment is < rcv_nxt */ + /* must be a duplicate of a packet that has already been correctly handled */ + + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); + tcp_ack_now(pcb); + } + } + + /* The sequence number must be within the window (above rcv_nxt + and below rcv_nxt + rcv_wnd) in order to be further + processed. */ + if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, + pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + if (pcb->rcv_nxt == seqno) { + /* The incoming segment is the next in sequence. We check if + we have to trim the end of the segment and update rcv_nxt + and pass the data to the application. */ + tcplen = TCP_TCPLEN(&inseg); + + if (tcplen > pcb->rcv_wnd) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + TCPWND_CHECK16(pcb->rcv_wnd); + inseg.len = (u16_t)pcb->rcv_wnd; + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } +#if TCP_QUEUE_OOSEQ + /* Received in-sequence data, adjust ooseq data if: + - FIN has been received or + - inseq overlaps with ooseq */ + if (pcb->ooseq != NULL) { + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: received in-order FIN, binning ooseq queue\n")); + /* Received in-order FIN means anything that was received + * out of order must now have been received in-order, so + * bin the ooseq queue */ + while (pcb->ooseq != NULL) { + struct tcp_seg *old_ooseq = pcb->ooseq; + pcb->ooseq = pcb->ooseq->next; + tcp_seg_free(old_ooseq); + } + } else { + struct tcp_seg *next = pcb->ooseq; + /* Remove all segments on ooseq that are covered by inseg already. + * FIN is copied from ooseq to inseg if present. */ + while (next && + TCP_SEQ_GEQ(seqno + tcplen, + next->tcphdr->seqno + next->len)) { + struct tcp_seg *tmp; + /* inseg cannot have FIN here (already processed above) */ + if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && + (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { + TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); + tcplen = TCP_TCPLEN(&inseg); + } + tmp = next; + next = next->next; + tcp_seg_free(tmp); + } + /* Now trim right side of inseg if it overlaps with the first + * segment on ooseq */ + if (next && + TCP_SEQ_GT(seqno + tcplen, + next->tcphdr->seqno)) { + /* inseg cannot have FIN here (already processed above) */ + inseg.len = (u16_t)(next->tcphdr->seqno - seqno); + if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { + inseg.len -= 1; + } + pbuf_realloc(inseg.p, inseg.len); + tcplen = TCP_TCPLEN(&inseg); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", + (seqno + tcplen) == next->tcphdr->seqno); + } + pcb->ooseq = next; + } + } +#endif /* TCP_QUEUE_OOSEQ */ + + pcb->rcv_nxt = seqno + tcplen; + + /* Update the receiver's (our) window. */ + LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); + pcb->rcv_wnd -= tcplen; + + tcp_update_rcv_ann_wnd(pcb); + + /* If there is data in the segment, we make preparations to + pass this up to the application. The ->recv_data variable + is used for holding the pbuf that goes to the + application. The code for reassembling out-of-sequence data + chains its data on this pbuf as well. + + If the segment was a FIN, we set the TF_GOT_FIN flag that will + be used to indicate to the application that the remote side has + closed its end of the connection. */ + if (inseg.p->tot_len > 0) { + recv_data = inseg.p; + /* Since this pbuf now is the responsibility of the + application, we delete our reference to it so that we won't + (mistakingly) deallocate it. */ + inseg.p = NULL; + } + if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); + recv_flags |= TF_GOT_FIN; + } + +#if TCP_QUEUE_OOSEQ + /* We now check if we have segments on the ->ooseq queue that + are now in sequence. */ + while (pcb->ooseq != NULL && + pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { + + struct tcp_seg *cseg = pcb->ooseq; + seqno = pcb->ooseq->tcphdr->seqno; + + pcb->rcv_nxt += TCP_TCPLEN(cseg); + LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", + pcb->rcv_wnd >= TCP_TCPLEN(cseg)); + pcb->rcv_wnd -= TCP_TCPLEN(cseg); + + tcp_update_rcv_ann_wnd(pcb); + + if (cseg->p->tot_len > 0) { + /* Chain this pbuf onto the pbuf that we will pass to + the application. */ + /* With window scaling, this can overflow recv_data->tot_len, but + that's not a problem since we explicitly fix that before passing + recv_data to the application. */ + if (recv_data) { + pbuf_cat(recv_data, cseg->p); + } else { + recv_data = cseg->p; + } + cseg->p = NULL; + } + if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); + recv_flags |= TF_GOT_FIN; + if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ + pcb->state = CLOSE_WAIT; + } + } + + pcb->ooseq = cseg->next; + tcp_seg_free(cseg); + } +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (pcb->ooseq != NULL) { + /* Some segments may have been removed from ooseq, let's remove all SACKs that + describe anything before the new beginning of that list. */ + tcp_remove_sacks_lt(pcb, pcb->ooseq->tcphdr->seqno); + } else if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* ooseq has been cleared. Nothing to SACK */ + memset(pcb->rcv_sacks, 0, sizeof(pcb->rcv_sacks)); + } + } +#endif /* LWIP_TCP_SACK_OUT */ +#endif /* TCP_QUEUE_OOSEQ */ + + + /* Acknowledge the segment(s). */ + tcp_ack(pcb); + +#if LWIP_TCP_SACK_OUT + if (LWIP_TCP_SACK_VALID(pcb, 0)) { + /* Normally the ACK for the data received could be piggy-backed on a data packet, + but lwIP currently does not support including SACKs in data packets. So we force + it to respond with an empty ACK packet (only if there is at least one SACK to be sent). + NOTE: tcp_send_empty_ack() on success clears the ACK flags (set by tcp_ack()) */ + tcp_send_empty_ack(pcb); + } +#endif /* LWIP_TCP_SACK_OUT */ + +#if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS + if (ip_current_is_v6()) { + /* Inform neighbor reachability of forward progress. */ + nd6_reachability_hint(ip6_current_src_addr()); + } +#endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ + + } else { + /* We get here if the incoming segment is out-of-sequence. */ + +#if TCP_QUEUE_OOSEQ + /* We queue the segment on the ->ooseq queue. */ + if (pcb->ooseq == NULL) { + pcb->ooseq = tcp_seg_copy(&inseg); +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* All the SACKs should be invalid, so we can simply store the most recent one: */ + pcb->rcv_sacks[0].left = seqno; + pcb->rcv_sacks[0].right = seqno + inseg.len; + } +#endif /* LWIP_TCP_SACK_OUT */ + } else { + /* If the queue is not empty, we walk through the queue and + try to find a place where the sequence number of the + incoming segment is between the sequence numbers of the + previous and the next segment on the ->ooseq queue. That is + the place where we put the incoming segment. If needed, we + trim the second edges of the previous and the incoming + segment so that it will fit into the sequence. + + If the incoming segment has the same sequence number as a + segment on the ->ooseq queue, we discard the segment that + contains less data. */ + +#if LWIP_TCP_SACK_OUT + /* This is the left edge of the lowest possible SACK range. + It may start before the newly received segment (possibly adjusted below). */ + u32_t sackbeg = TCP_SEQ_LT(seqno, pcb->ooseq->tcphdr->seqno) ? seqno : pcb->ooseq->tcphdr->seqno; +#endif /* LWIP_TCP_SACK_OUT */ + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; next = next->next) { + if (seqno == next->tcphdr->seqno) { + /* The sequence number of the incoming segment is the + same as the sequence number of the segment on + ->ooseq. We check the lengths to see which one to + discard. */ + if (inseg.len > next->len) { + /* The incoming segment is larger than the old + segment. We replace some segments with the new + one. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (prev != NULL) { + prev->next = cseg; + } else { + pcb->ooseq = cseg; + } + tcp_oos_insert_segment(cseg, next); + } + break; + } else { + /* Either the lengths are the same or the incoming + segment was smaller than the old one; in either + case, we ditch the incoming segment. */ + break; + } + } else { + if (prev == NULL) { + if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { + /* The sequence number of the incoming segment is lower + than the sequence number of the first segment on the + queue. We put the incoming segment first on the + queue. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + pcb->ooseq = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } else { + /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && + TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ + if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno + 1, next->tcphdr->seqno - 1)) { + /* The sequence number of the incoming segment is in + between the sequence numbers of the previous and + the next segment on ->ooseq. We trim trim the previous + segment, delete next segments that included in received segment + and trim received, if needed. */ + struct tcp_seg *cseg = tcp_seg_copy(&inseg); + if (cseg != NULL) { + if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { + /* We need to trim the prev segment. */ + prev->len = (u16_t)(seqno - prev->tcphdr->seqno); + pbuf_realloc(prev->p, prev->len); + } + prev->next = cseg; + tcp_oos_insert_segment(cseg, next); + } + break; + } + } + +#if LWIP_TCP_SACK_OUT + /* The new segment goes after the 'next' one. If there is a "hole" in sequence numbers + between 'prev' and the beginning of 'next', we want to move sackbeg. */ + if (prev != NULL && prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } +#endif /* LWIP_TCP_SACK_OUT */ + + /* We don't use 'prev' below, so let's set it to current 'next'. + This way even if we break the loop below, 'prev' will be pointing + at the segment right in front of the newly added one. */ + prev = next; + + /* If the "next" segment is the last segment on the + ooseq queue, we add the incoming segment to the end + of the list. */ + if (next->next == NULL && + TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { + if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { + /* segment "next" already contains all data */ + break; + } + next->next = tcp_seg_copy(&inseg); + if (next->next != NULL) { + if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { + /* We need to trim the last segment. */ + next->len = (u16_t)(seqno - next->tcphdr->seqno); + pbuf_realloc(next->p, next->len); + } + /* check if the remote side overruns our receive window */ + if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, + ("tcp_receive: other end overran receive window" + "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", + seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); + if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { + /* Must remove the FIN from the header as we're trimming + * that byte of sequence-space from the packet */ + TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); + } + /* Adjust length of segment to fit in the window. */ + next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); + pbuf_realloc(next->next->p, next->next->len); + tcplen = TCP_TCPLEN(next->next); + LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", + (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); + } + } + break; + } + } + } + +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + if (prev == NULL) { + /* The new segment is at the beginning. sackbeg should already be set properly. + We need to find the right edge. */ + next = pcb->ooseq; + } else if (prev->next != NULL) { + /* The new segment was added after 'prev'. If there is a "hole" between 'prev' and 'prev->next', + we need to move sackbeg. After that we should find the right edge. */ + next = prev->next; + if (prev->tcphdr->seqno + prev->len != next->tcphdr->seqno) { + sackbeg = next->tcphdr->seqno; + } + } else { + next = NULL; + } + if (next != NULL) { + u32_t sackend = next->tcphdr->seqno; + for ( ; (next != NULL) && (sackend == next->tcphdr->seqno); next = next->next) { + sackend += next->len; + } + tcp_add_sack(pcb, sackbeg, sackend); + } + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) + { + /* Check that the data on ooseq doesn't exceed one of the limits + and throw away everything above that limit. */ +#ifdef TCP_OOSEQ_BYTES_LIMIT + const u32_t ooseq_max_blen = TCP_OOSEQ_BYTES_LIMIT(pcb); + u32_t ooseq_blen = 0; +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + const u16_t ooseq_max_qlen = TCP_OOSEQ_PBUFS_LIMIT(pcb); + u16_t ooseq_qlen = 0; +#endif + struct tcp_seg *next, *prev = NULL; + for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { + struct pbuf *p = next->p; + int stop_here = 0; +#ifdef TCP_OOSEQ_BYTES_LIMIT + ooseq_blen += p->tot_len; + if (ooseq_blen > ooseq_max_blen) { + stop_here = 1; + } +#endif +#ifdef TCP_OOSEQ_PBUFS_LIMIT + ooseq_qlen += pbuf_clen(p); + if (ooseq_qlen > ooseq_max_qlen) { + stop_here = 1; + } +#endif + if (stop_here) { +#if LWIP_TCP_SACK_OUT + if (pcb->flags & TF_SACK) { + /* Let's remove all SACKs from next's seqno up. */ + tcp_remove_sacks_gt(pcb, next->tcphdr->seqno); + } +#endif /* LWIP_TCP_SACK_OUT */ + /* too much ooseq data, dump this and everything after it */ + tcp_segs_free(next); + if (prev == NULL) { + /* first ooseq segment is too much, dump the whole queue */ + pcb->ooseq = NULL; + } else { + /* just dump 'next' and everything after it */ + prev->next = NULL; + } + break; + } + } + } +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ +#endif /* TCP_QUEUE_OOSEQ */ + + /* We send the ACK packet after we've (potentially) dealt with SACKs, + so they can be included in the acknowledgment. */ + tcp_send_empty_ack(pcb); + } + } else { + /* The incoming segment is not within the window. */ + tcp_send_empty_ack(pcb); + } + } else { + /* Segments with length 0 is taken care of here. Segments that + fall out of the window are ACKed. */ + if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { + tcp_ack_now(pcb); + } + } +} + +static u8_t +tcp_get_next_optbyte(void) +{ + u16_t optidx = tcp_optidx++; + if ((tcphdr_opt2 == NULL) || (optidx < tcphdr_opt1len)) { + u8_t *opts = (u8_t *)tcphdr + TCP_HLEN; + return opts[optidx]; + } else { + u8_t idx = (u8_t)(optidx - tcphdr_opt1len); + return tcphdr_opt2[idx]; + } +} + +/** + * Parses the options contained in the incoming segment. + * + * Called from tcp_listen_input() and tcp_process(). + * Currently, only the MSS option is supported! + * + * @param pcb the tcp_pcb for which a segment arrived + */ +static void +tcp_parseopt(struct tcp_pcb *pcb) +{ + u8_t data; + u16_t mss; +#if LWIP_TCP_TIMESTAMPS + u32_t tsval; +#endif + + LWIP_ASSERT("tcp_parseopt: invalid pcb", pcb != NULL); + + /* Parse the TCP MSS option, if present. */ + if (tcphdr_optlen != 0) { + for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { + u8_t opt = tcp_get_next_optbyte(); + switch (opt) { + case LWIP_TCP_OPT_EOL: + /* End of options. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); + return; + case LWIP_TCP_OPT_NOP: + /* NOP option. */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); + break; + case LWIP_TCP_OPT_MSS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An MSS option with the right option length. */ + mss = (u16_t)(tcp_get_next_optbyte() << 8); + mss |= tcp_get_next_optbyte(); + /* Limit the mss to the configured TCP_MSS and prevent division by zero */ + pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; + break; +#if LWIP_WND_SCALE + case LWIP_TCP_OPT_WS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* An WND_SCALE option with the right option length. */ + data = tcp_get_next_optbyte(); + /* If syn was received with wnd scale option, + activate wnd scale opt, but only if this is not a retransmission */ + if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { + pcb->snd_scale = data; + if (pcb->snd_scale > 14U) { + pcb->snd_scale = 14U; + } + pcb->rcv_scale = TCP_RCV_SCALE; + tcp_set_flags(pcb, TF_WND_SCALE); + /* window scaling is enabled, we can use the full receive window */ + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + } + break; +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_TIMESTAMPS + case LWIP_TCP_OPT_TS: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP timestamp option with valid length */ + tsval = tcp_get_next_optbyte(); + tsval |= (tcp_get_next_optbyte() << 8); + tsval |= (tcp_get_next_optbyte() << 16); + tsval |= (tcp_get_next_optbyte() << 24); + if (flags & TCP_SYN) { + pcb->ts_recent = lwip_ntohl(tsval); + /* Enable sending timestamps in every segment now that we know + the remote host supports it. */ + tcp_set_flags(pcb, TF_TIMESTAMP); + } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno + tcplen)) { + pcb->ts_recent = lwip_ntohl(tsval); + } + /* Advance to next option (6 bytes already read) */ + tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; + break; +#endif /* LWIP_TCP_TIMESTAMPS */ +#if LWIP_TCP_SACK_OUT + case LWIP_TCP_OPT_SACK_PERM: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: SACK_PERM\n")); + if (tcp_get_next_optbyte() != LWIP_TCP_OPT_LEN_SACK_PERM || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_SACK_PERM) > tcphdr_optlen) { + /* Bad length */ + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + return; + } + /* TCP SACK_PERM option with valid length */ + if (flags & TCP_SYN) { + /* We only set it if we receive it in a SYN (or SYN+ACK) packet */ + tcp_set_flags(pcb, TF_SACK); + } + break; +#endif /* LWIP_TCP_SACK_OUT */ + default: + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); + data = tcp_get_next_optbyte(); + if (data < 2) { + LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); + /* If the length field is zero, the options are malformed + and we don't process them further. */ + return; + } + /* All other options have a length field, so that we easily + can skip past them. */ + tcp_optidx += data - 2; + } + } + } +} + +void +tcp_trigger_input_pcb_close(void) +{ + recv_flags |= TF_CLOSED; +} + +#if LWIP_TCP_SACK_OUT +/** + * Called by tcp_receive() to add new SACK entry. + * + * The new SACK entry will be placed at the beginning of rcv_sacks[], as the newest one. + * Existing SACK entries will be "pushed back", to preserve their order. + * This is the behavior described in RFC 2018, section 4. + * + * @param pcb the tcp_pcb for which a segment arrived + * @param left the left side of the SACK (the first sequence number) + * @param right the right side of the SACK (the first sequence number past this SACK) + */ +static void +tcp_add_sack(struct tcp_pcb *pcb, u32_t left, u32_t right) +{ + u8_t i; + u8_t unused_idx; + + if ((pcb->flags & TF_SACK) == 0 || !TCP_SEQ_LT(left, right)) { + return; + } + + /* First, let's remove all SACKs that are no longer needed (because they overlap with the newest one), + while moving all other SACKs forward. + We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at [i] if it doesn't overlap with left:right range. + It does not overlap if its right side is before the newly added SACK, + or if its left side is after the newly added SACK. + NOTE: The equality should not really happen, but it doesn't hurt. */ + if (TCP_SEQ_LEQ(pcb->rcv_sacks[i].right, left) || TCP_SEQ_LEQ(right, pcb->rcv_sacks[i].left)) { + if (unused_idx != i) { + /* We don't need to copy if it's already in the right spot */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + ++unused_idx; + } + } + + /* Now 'unused_idx' is the index of the first invalid SACK entry, + anywhere between 0 (no valid entries) and LWIP_TCP_MAX_SACK_NUM (all entries are valid). + We want to clear this and all following SACKs. + However, we will be adding another one in the front (and shifting everything else back). + So let's just iterate from the back, and set each entry to the one to the left if it's valid, + or to 0 if it is not. */ + for (i = LWIP_TCP_MAX_SACK_NUM - 1; i > 0; --i) { + /* [i] is the index we are setting, and the value should be at index [i-1], + or 0 if that index is unused (>= unused_idx). */ + if (i - 1 >= unused_idx) { + /* [i-1] is unused. Let's clear [i]. */ + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } else { + pcb->rcv_sacks[i] = pcb->rcv_sacks[i - 1]; + } + } + + /* And now we can store the newest SACK */ + pcb->rcv_sacks[0].left = left; + pcb->rcv_sacks[0].right = right; +} + +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are less than 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the lowest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_lt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its right side is > 'seq'. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[i].right, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its left side is < 'seq', in which case we should adjust it. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[unused_idx].left, seq)) { + pcb->rcv_sacks[unused_idx].left = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} + +#if defined(TCP_OOSEQ_BYTES_LIMIT) || defined(TCP_OOSEQ_PBUFS_LIMIT) +/** + * Called to remove a range of SACKs. + * + * SACK entries will be removed or adjusted to not acknowledge any sequence + * numbers that are greater than (or equal to) 'seq' passed. It not only invalidates entries, + * but also moves all entries that are still valid to the beginning. + * + * @param pcb the tcp_pcb to modify + * @param seq the highest sequence number to keep in SACK entries + */ +static void +tcp_remove_sacks_gt(struct tcp_pcb *pcb, u32_t seq) +{ + u8_t i; + u8_t unused_idx; + + /* We run this loop for all entries, until we find the first invalid one. + There is no point checking after that. */ + for (i = unused_idx = 0; (i < LWIP_TCP_MAX_SACK_NUM) && LWIP_TCP_SACK_VALID(pcb, i); ++i) { + /* We only want to use SACK at index [i] if its left side is < 'seq'. */ + if (TCP_SEQ_LT(pcb->rcv_sacks[i].left, seq)) { + if (unused_idx != i) { + /* We only copy it if it's not in the right spot already. */ + pcb->rcv_sacks[unused_idx] = pcb->rcv_sacks[i]; + } + /* NOTE: It is possible that its right side is > 'seq', in which case we should adjust it. */ + if (TCP_SEQ_GT(pcb->rcv_sacks[unused_idx].right, seq)) { + pcb->rcv_sacks[unused_idx].right = seq; + } + ++unused_idx; + } + } + + /* We also need to invalidate everything from 'unused_idx' till the end */ + for (i = unused_idx; i < LWIP_TCP_MAX_SACK_NUM; ++i) { + pcb->rcv_sacks[i].left = pcb->rcv_sacks[i].right = 0; + } +} +#endif /* TCP_OOSEQ_BYTES_LIMIT || TCP_OOSEQ_PBUFS_LIMIT */ + +#endif /* LWIP_TCP_SACK_OUT */ + +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c new file mode 100644 index 0000000..375e18d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/tcp_out.c @@ -0,0 +1,2190 @@ +/** + * @file + * Transmission Control Protocol, outgoing traffic + * + * The output functions of TCP. + * + * There are two distinct ways for TCP segments to get sent: + * - queued data: these are segments transferring data or segments containing + * SYN or FIN (which both count as one sequence number). They are created as + * struct @ref pbuf together with a struct tcp_seg and enqueue to the + * unsent list of the pcb. They are sent by tcp_output: + * - @ref tcp_write : creates data segments + * - @ref tcp_split_unsent_seg : splits a data segment + * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments + * - @ref tcp_output / tcp_output_segment : finalize the tcp header + * (e.g. sequence numbers, options, checksum) and output to IP + * - the various tcp_rexmit functions shuffle around segments between the + * unsent an unacked lists to retransmit them + * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and + * segment for these functions + * - direct send: these segments don't contain data but control the connection + * behaviour. They are created as pbuf only and sent directly without + * enqueueing them: + * - @ref tcp_send_empty_ack sends an ACK-only segment + * - @ref tcp_rst sends a RST segment + * - @ref tcp_keepalive sends a keepalive segment + * - @ref tcp_zero_window_probe sends a window probe segment + * - tcp_output_alloc_header allocates a header-only pbuf for these functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#include "lwip/opt.h" + +#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/priv/tcp_priv.h" +#include "lwip/def.h" +#include "lwip/mem.h" +#include "lwip/memp.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/inet_chksum.h" +#include "lwip/stats.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#if LWIP_TCP_TIMESTAMPS +#include "lwip/sys.h" +#endif + +#include + +#ifdef LWIP_HOOK_FILENAME +#include LWIP_HOOK_FILENAME +#endif + +/* Allow to add custom TCP header options by defining this hook */ +#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags)) +#else +#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags) +#endif + +/* Define some copy-macros for checksum-on-copy so that the code looks + nicer by preventing too many ifdef's. */ +#if TCP_CHECKSUM_ON_COPY +#define TCP_DATA_COPY(dst, src, len, seg) do { \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ + len, &seg->chksum, &seg->chksum_swapped); \ + seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ + tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); +#else /* TCP_CHECKSUM_ON_COPY*/ +#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) +#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) +#endif /* TCP_CHECKSUM_ON_COPY*/ + +/** Define this to 1 for an extra check that the output checksum is valid + * (usefule when the checksum is generated by the application, not the stack) */ +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 +#endif +/* Allow to override the failure of sanity check from warning to e.g. hard failure */ +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK +#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL +#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) +#endif +#endif + +#if TCP_OVERSIZE +/** The size of segment pbufs created when TCP_OVERSIZE is enabled */ +#ifndef TCP_OVERSIZE_CALC_LENGTH +#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) +#endif +#endif + +/* Forward declarations.*/ +static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); + +/* tcp_route: common code that returns a fixed bound netif or calls ip_route */ +static struct netif * +tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst) +{ + LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */ + + if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) { + return netif_get_by_index(pcb->netif_idx); + } else { + return ip_route(src, dst); + } +} + +/** + * Create a TCP segment with prefilled header. + * + * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg + * + * @param pcb Protocol control block for the TCP connection. + * @param p pbuf that is used to hold the TCP header. + * @param hdrflags TCP flags for header. + * @param seqno TCP sequence number of this packet + * @param optflags options to include in TCP header + * @return a new tcp_seg pointing to p, or NULL. + * The TCP header is filled in except ackno and wnd. + * p is freed on failure. + */ +static struct tcp_seg * +tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags) +{ + struct tcp_seg *seg; + u8_t optlen; + + LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); + pbuf_free(p); + return NULL; + } + seg->flags = optflags; + seg->next = NULL; + seg->p = p; + LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); + seg->len = p->tot_len - optlen; +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = 0; + seg->chksum_swapped = 0; + /* check optflags */ + LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", + (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* build TCP header */ + if (pbuf_add_header(p, TCP_HLEN)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); + TCP_STATS_INC(tcp.err); + tcp_seg_free(seg); + return NULL; + } + seg->tcphdr = (struct tcp_hdr *)seg->p->payload; + seg->tcphdr->src = lwip_htons(pcb->local_port); + seg->tcphdr->dest = lwip_htons(pcb->remote_port); + seg->tcphdr->seqno = lwip_htonl(seqno); + /* ackno is set in tcp_output */ + TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags); + /* wnd and chksum are set in tcp_output */ + seg->tcphdr->urgp = 0; + return seg; +} + +/** + * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. + * + * This function is like pbuf_alloc(layer, length, PBUF_RAM) except + * there may be extra bytes available at the end. + * + * Called by @ref tcp_write + * + * @param layer flag to define header size. + * @param length size of the pbuf's payload. + * @param max_length maximum usable size of payload+oversize. + * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. + * @param pcb The TCP connection that will enqueue the pbuf. + * @param apiflags API flags given to tcp_write. + * @param first_seg true when this pbuf will be used in the first enqueued segment. + */ +#if TCP_OVERSIZE +static struct pbuf * +tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, + u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags, + u8_t first_seg) +{ + struct pbuf *p; + u16_t alloc = length; + + LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL); + LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL); + +#if LWIP_NETIF_TX_SINGLE_PBUF + LWIP_UNUSED_ARG(max_length); + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(apiflags); + LWIP_UNUSED_ARG(first_seg); + alloc = max_length; +#else /* LWIP_NETIF_TX_SINGLE_PBUF */ + if (length < max_length) { + /* Should we allocate an oversized pbuf, or just the minimum + * length required? If tcp_write is going to be called again + * before this segment is transmitted, we want the oversized + * buffer. If the segment will be transmitted immediately, we can + * save memory by allocating only length. We use a simple + * heuristic based on the following information: + * + * Did the user set TCP_WRITE_FLAG_MORE? + * + * Will the Nagle algorithm defer transmission of this segment? + */ + if ((apiflags & TCP_WRITE_FLAG_MORE) || + (!(pcb->flags & TF_NODELAY) && + (!first_seg || + pcb->unsent != NULL || + pcb->unacked != NULL))) { + alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); + } + } +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + p = pbuf_alloc(layer, alloc, PBUF_RAM); + if (p == NULL) { + return NULL; + } + LWIP_ASSERT("need unchained pbuf", p->next == NULL); + *oversize = p->len - length; + /* trim p->len to the currently used size */ + p->len = p->tot_len = length; + return p; +} +#else /* TCP_OVERSIZE */ +#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) +#endif /* TCP_OVERSIZE */ + +#if TCP_CHECKSUM_ON_COPY +/** Add a checksum of newly added data to the segment. + * + * Called by tcp_write and tcp_split_unsent_seg. + */ +static void +tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, + u8_t *seg_chksum_swapped) +{ + u32_t helper; + /* add chksum to old chksum and fold to u16_t */ + helper = chksum + *seg_chksum; + chksum = FOLD_U32T(helper); + if ((len & 1) != 0) { + *seg_chksum_swapped = 1 - *seg_chksum_swapped; + chksum = SWAP_BYTES_IN_WORD(chksum); + } + *seg_chksum = chksum; +} +#endif /* TCP_CHECKSUM_ON_COPY */ + +/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). + * + * @param pcb the tcp pcb to check for + * @param len length of data to send (checked agains snd_buf) + * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise + */ +static err_t +tcp_write_checks(struct tcp_pcb *pcb, u16_t len) +{ + LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL); + + /* connection is in invalid state for data transmission? */ + if ((pcb->state != ESTABLISHED) && + (pcb->state != CLOSE_WAIT) && + (pcb->state != SYN_SENT) && + (pcb->state != SYN_RCVD)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); + return ERR_CONN; + } else if (len == 0) { + return ERR_OK; + } + + /* fail on too much data */ + if (len > pcb->snd_buf) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", + len, pcb->snd_buf)); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); + + /* If total number of pbufs on the unsent/unacked queues exceeds the + * configured maximum, return an error */ + /* check for configured max queuelen and possible overflow */ + if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", + pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); + TCP_STATS_INC(tcp.memerr); + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return ERR_MEM; + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", + pcb->unacked != NULL || pcb->unsent != NULL); + } else { + LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", + pcb->unacked == NULL && pcb->unsent == NULL); + } + return ERR_OK; +} + +/** + * @ingroup tcp_raw + * Write data for sending (but does not send it immediately). + * + * It waits in the expectation of more data being sent soon (as + * it can send them more efficiently by combining them together). + * To prompt the system to send data now, call tcp_output() after + * calling tcp_write(). + * + * This function enqueues the data pointed to by the argument dataptr. The length of + * the data is passed as the len parameter. The apiflags can be one or more of: + * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated + * for the data to be copied into. If this flag is not given, no new memory + * should be allocated and the data should only be referenced by pointer. This + * also means that the memory behind dataptr must not change until the data is + * ACKed by the remote host + * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted, + * the PSH flag is set in the last segment created by this call to tcp_write. + * If this flag is given, the PSH flag is not set. + * + * The tcp_write() function will fail and return ERR_MEM if the length + * of the data exceeds the current send buffer size or if the length of + * the queue of outgoing segment is larger than the upper limit defined + * in lwipopts.h. The number of bytes available in the output queue can + * be retrieved with the tcp_sndbuf() function. + * + * The proper way to use this function is to call the function with at + * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM, + * the application should wait until some of the currently enqueued + * data has been successfully received by the other host and try again. + * + * @param pcb Protocol control block for the TCP connection to enqueue data for. + * @param arg Pointer to the data to be enqueued for sending. + * @param len Data length in bytes + * @param apiflags combination of following flags : + * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack + * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, + * @return ERR_OK if enqueued, another err_t on error + */ +err_t +tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) +{ + struct pbuf *concat_p = NULL; + struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; + u16_t pos = 0; /* position in 'arg' data */ + u16_t queuelen; + u8_t optlen; + u8_t optflags = 0; +#if TCP_OVERSIZE + u16_t oversize = 0; + u16_t oversize_used = 0; +#if TCP_OVERSIZE_DBGCHECK + u16_t oversize_add = 0; +#endif /* TCP_OVERSIZE_DBGCHECK*/ +#endif /* TCP_OVERSIZE */ + u16_t extendlen = 0; +#if TCP_CHECKSUM_ON_COPY + u16_t concat_chksum = 0; + u8_t concat_chksum_swapped = 0; + u16_t concat_chksummed = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + err_t err; + u16_t mss_local; + + LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG); + + /* don't allocate segments bigger than half the maximum window we ever received */ + mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2)); + mss_local = mss_local ? mss_local : pcb->mss; + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_NETIF_TX_SINGLE_PBUF + /* Always copy to try to create single pbufs for TX */ + apiflags |= TCP_WRITE_FLAG_COPY; +#endif /* LWIP_NETIF_TX_SINGLE_PBUF */ + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", + (void *)pcb, arg, len, (u16_t)apiflags)); + LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", + arg != NULL, return ERR_ARG;); + + err = tcp_write_checks(pcb, len); + if (err != ERR_OK) { + return err; + } + queuelen = pcb->snd_queuelen; + +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP)) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host. */ + optflags = TF_SEG_OPTS_TS; + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb); + /* ensure that segments can hold at least one data byte... */ + mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); + } else +#endif /* LWIP_TCP_TIMESTAMPS */ + { + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + } + + + /* + * TCP segmentation is done in three phases with increasing complexity: + * + * 1. Copy data directly into an oversized pbuf. + * 2. Chain a new pbuf to the end of pcb->unsent. + * 3. Create new segments. + * + * We may run out of memory at any point. In that case we must + * return ERR_MEM and not change anything in pcb. Therefore, all + * changes are recorded in local variables and committed at the end + * of the function. Some pcb fields are maintained in local copies: + * + * queuelen = pcb->snd_queuelen + * oversize = pcb->unsent_oversize + * + * These variables are set consistently by the phases: + * + * seg points to the last segment tampered with. + * + * pos records progress as data is segmented. + */ + + /* Find the tail of the unsent queue. */ + if (pcb->unsent != NULL) { + u16_t space; + u16_t unsent_optlen; + + /* @todo: this could be sped up by keeping last_unsent in the pcb */ + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + /* Usable space at the end of the last unsent segment */ + unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb); + LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); + space = mss_local - (last_unsent->len + unsent_optlen); + + /* + * Phase 1: Copy data directly into an oversized pbuf. + * + * The number of bytes copied is recorded in the oversize_used + * variable. The actual copying is done at the bottom of the + * function. + */ +#if TCP_OVERSIZE +#if TCP_OVERSIZE_DBGCHECK + /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ + LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", + pcb->unsent_oversize == last_unsent->oversize_left); +#endif /* TCP_OVERSIZE_DBGCHECK */ + oversize = pcb->unsent_oversize; + if (oversize > 0) { + LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); + seg = last_unsent; + oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); + pos += oversize_used; + oversize -= oversize_used; + space -= oversize_used; + } + /* now we are either finished or oversize is zero */ + LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); +#endif /* TCP_OVERSIZE */ + +#if !LWIP_NETIF_TX_SINGLE_PBUF + /* + * Phase 2: Chain a new pbuf to the end of pcb->unsent. + * + * As an exception when NOT copying the data, if the given data buffer + * directly follows the last unsent data buffer in memory, extend the last + * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. + * + * We don't extend segments containing SYN/FIN flags or options + * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at + * the end. + * + * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute + * it after rexmit puts a segment from unacked to unsent and at this point, + * oversize info is lost. + */ + if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { + u16_t seglen = LWIP_MIN(space, len - pos); + seg = last_unsent; + + /* Create a pbuf with a copy or reference to seglen bytes. We + * can use PBUF_RAW here since the data appears in the middle of + * a segment. A header will never be prepended. */ + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* Data is copied */ + if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", + seglen)); + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + oversize_add = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ + TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); +#if TCP_CHECKSUM_ON_COPY + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + queuelen += pbuf_clen(concat_p); + } else { + /* Data is not copied */ + /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ + struct pbuf *p; + for (p = last_unsent->p; p->next != NULL; p = p->next); + if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) && + (const u8_t *)p->payload + p->len == (const u8_t *)arg) { + LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); + extendlen = seglen; + } else { + if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos; + queuelen += pbuf_clen(concat_p); + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen, + &concat_chksum, &concat_chksum_swapped); + concat_chksummed += seglen; +#endif /* TCP_CHECKSUM_ON_COPY */ + } + + pos += seglen; + } +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + } else { +#if TCP_OVERSIZE + LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", + pcb->unsent_oversize == 0); +#endif /* TCP_OVERSIZE */ + } + + /* + * Phase 3: Create new segments. + * + * The new segments are chained together in the local 'queue' + * variable, ready to be appended to pcb->unsent. + */ + while (pos < len) { + struct pbuf *p; + u16_t left = len - pos; + u16_t max_len = mss_local - optlen; + u16_t seglen = LWIP_MIN(left, max_len); +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; +#endif /* TCP_CHECKSUM_ON_COPY */ + + if (apiflags & TCP_WRITE_FLAG_COPY) { + /* If copy is set, memory should be allocated and data copied + * into pbuf */ + if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); + goto memerr; + } + LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", + (p->len >= seglen)); + TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped); + } else { + /* Copy is not set: First allocate a pbuf for holding the data. + * Since the referenced data is available at least until it is + * sent out on the link (as it has to be ACKed by the remote + * party) we can safely use PBUF_ROM instead of PBUF_REF here. + */ + struct pbuf *p2; +#if TCP_OVERSIZE + LWIP_ASSERT("oversize == 0", oversize == 0); +#endif /* TCP_OVERSIZE */ + if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum of nocopy-data */ + chksum = ~inet_chksum((const u8_t *)arg + pos, seglen); + if (seglen & 1) { + chksum_swapped = 1; + chksum = SWAP_BYTES_IN_WORD(chksum); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + /* reference the non-volatile payload data */ + ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos; + + /* Second, allocate a pbuf for the headers. */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + /* If allocation fails, we have to deallocate the data pbuf as + * well. */ + pbuf_free(p2); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); + goto memerr; + } + /* Concatenate the headers and data pbufs together. */ + pbuf_cat(p/*header*/, p2/*data*/); + } + + queuelen += pbuf_clen(p); + + /* Now that there are more segments queued, we check again if the + * length of the queue exceeds the configured maximum or + * overflows. */ + if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", + queuelen, (int)TCP_SND_QUEUELEN)); + pbuf_free(p); + goto memerr; + } + + if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { + goto memerr; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = oversize; +#endif /* TCP_OVERSIZE_DBGCHECK */ +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* first segment of to-be-queued data? */ + if (queue == NULL) { + queue = seg; + } else { + /* Attach the segment to the end of the queued segments */ + LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); + prev_seg->next = seg; + } + /* remember last segment of to-be-queued data for next iteration */ + prev_seg = seg; + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); + + pos += seglen; + } + + /* + * All three segmentation phases were successful. We can commit the + * transaction. + */ +#if TCP_OVERSIZE_DBGCHECK + if ((last_unsent != NULL) && (oversize_add != 0)) { + last_unsent->oversize_left += oversize_add; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* + * Phase 1: If data has been added to the preallocated tail of + * last_unsent, we update the length fields of the pbuf chain. + */ +#if TCP_OVERSIZE + if (oversize_used > 0) { + struct pbuf *p; + /* Bump tot_len of whole chain, len of tail */ + for (p = last_unsent->p; p; p = p->next) { + p->tot_len += oversize_used; + if (p->next == NULL) { + TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); + p->len += oversize_used; + } + } + last_unsent->len += oversize_used; +#if TCP_OVERSIZE_DBGCHECK + LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", + last_unsent->oversize_left >= oversize_used); + last_unsent->oversize_left -= oversize_used; +#endif /* TCP_OVERSIZE_DBGCHECK */ + } + pcb->unsent_oversize = oversize; +#endif /* TCP_OVERSIZE */ + + /* + * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we + * determined that the last ROM pbuf can be extended to include the new data. + */ + if (concat_p != NULL) { + LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", + (last_unsent != NULL)); + pbuf_cat(last_unsent->p, concat_p); + last_unsent->len += concat_p->tot_len; + } else if (extendlen > 0) { + struct pbuf *p; + LWIP_ASSERT("tcp_write: extension of reference requires reference", + last_unsent != NULL && last_unsent->p != NULL); + for (p = last_unsent->p; p->next != NULL; p = p->next) { + p->tot_len += extendlen; + } + p->tot_len += extendlen; + p->len += extendlen; + last_unsent->len += extendlen; + } + +#if TCP_CHECKSUM_ON_COPY + if (concat_chksummed) { + LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", + concat_p != NULL || extendlen > 0); + /*if concat checksumm swapped - swap it back */ + if (concat_chksum_swapped) { + concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); + } + tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, + &last_unsent->chksum_swapped); + last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* + * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that + * is harmless + */ + if (last_unsent == NULL) { + pcb->unsent = queue; + } else { + last_unsent->next = queue; + } + + /* + * Finally update the pcb state. + */ + pcb->snd_lbb += len; + pcb->snd_buf -= len; + pcb->snd_queuelen = queuelen; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", + pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + /* Set the PSH flag in the last segment that we enqueued. */ + if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) { + TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); + } + + return ERR_OK; +memerr: + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + + if (concat_p != NULL) { + pbuf_free(concat_p); + } + if (queue != NULL) { + tcp_segs_free(queue); + } + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || + pcb->unsent != NULL); + } + LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); + return ERR_MEM; +} + +/** + * Split segment on the head of the unsent queue. If return is not + * ERR_OK, existing head remains intact + * + * The split is accomplished by creating a new TCP segment and pbuf + * which holds the remainder payload after the split. The original + * pbuf is trimmed to new length. This allows splitting of read-only + * pbufs + * + * @param pcb the tcp_pcb for which to split the unsent head + * @param split the amount of payload to remain in the head + */ +err_t +tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split) +{ + struct tcp_seg *seg = NULL, *useg = NULL; + struct pbuf *p = NULL; + u8_t optlen; + u8_t optflags; + u8_t split_flags; + u8_t remainder_flags; + u16_t remainder; + u16_t offset; +#if TCP_CHECKSUM_ON_COPY + u16_t chksum = 0; + u8_t chksum_swapped = 0; + struct pbuf *q; +#endif /* TCP_CHECKSUM_ON_COPY */ + + LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL); + + useg = pcb->unsent; + if (useg == NULL) { + return ERR_MEM; + } + + if (split == 0) { + LWIP_ASSERT("Can't split segment into length 0", 0); + return ERR_VAL; + } + + if (useg->len <= split) { + return ERR_OK; + } + + LWIP_ASSERT("split <= mss", split <= pcb->mss); + LWIP_ASSERT("useg->len > 0", useg->len > 0); + + /* We should check that we don't exceed TCP_SND_QUEUELEN but we need + * to split this packet so we may actually exceed the max value by + * one! + */ + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen)); + + optflags = useg->flags; +#if TCP_CHECKSUM_ON_COPY + /* Remove since checksum is not stored until after tcp_create_segment() */ + optflags &= ~TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + optlen = LWIP_TCP_OPT_LENGTH(optflags); + remainder = useg->len - split; + + /* Create new pbuf for the remainder of the split */ + p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM); + if (p == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder)); + goto memerr; + } + + /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */ + offset = useg->p->tot_len - useg->len + split; + /* Copy remainder into new pbuf, headers and options will not be filled out */ + if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder)); + goto memerr; + } +#if TCP_CHECKSUM_ON_COPY + /* calculate the checksum on remainder data */ + tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder, + &chksum, &chksum_swapped); +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Options are created when calling tcp_output() */ + + /* Migrate flags from original segment */ + split_flags = TCPH_FLAGS(useg->tcphdr); + remainder_flags = 0; /* ACK added in tcp_output() */ + + if (split_flags & TCP_PSH) { + split_flags &= ~TCP_PSH; + remainder_flags |= TCP_PSH; + } + if (split_flags & TCP_FIN) { + split_flags &= ~TCP_FIN; + remainder_flags |= TCP_FIN; + } + /* SYN should be left on split, RST should not be present with data */ + + seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags); + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("tcp_split_unsent_seg: could not create new TCP segment\n")); + goto memerr; + } + +#if TCP_CHECKSUM_ON_COPY + seg->chksum = chksum; + seg->chksum_swapped = chksum_swapped; + seg->flags |= TF_SEG_DATA_CHECKSUMMED; +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Remove this segment from the queue since trimming it may free pbufs */ + pcb->snd_queuelen -= pbuf_clen(useg->p); + + /* Trim the original pbuf into our split size. At this point our remainder segment must be setup + successfully because we are modifying the original segment */ + pbuf_realloc(useg->p, useg->p->tot_len - remainder); + useg->len -= remainder; + TCPH_SET_FLAG(useg->tcphdr, split_flags); +#if TCP_OVERSIZE_DBGCHECK + /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */ + useg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + + /* Add back to the queue with new trimmed pbuf */ + pcb->snd_queuelen += pbuf_clen(useg->p); + +#if TCP_CHECKSUM_ON_COPY + /* The checksum on the split segment is now incorrect. We need to re-run it over the split */ + useg->chksum = 0; + useg->chksum_swapped = 0; + q = useg->p; + offset = q->tot_len - useg->len; /* Offset due to exposed headers */ + + /* Advance to the pbuf where the offset ends */ + while (q != NULL && offset > q->len) { + offset -= q->len; + q = q->next; + } + LWIP_ASSERT("Found start of payload pbuf", q != NULL); + /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */ + for (; q != NULL; offset = 0, q = q->next) { + tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset, + &useg->chksum, &useg->chksum_swapped); + } +#endif /* TCP_CHECKSUM_ON_COPY */ + + /* Update number of segments on the queues. Note that length now may + * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf + * because the total amount of data is constant when packet is split */ + pcb->snd_queuelen += pbuf_clen(seg->p); + + /* Finally insert remainder into queue after split (which stays head) */ + seg->next = useg->next; + useg->next = seg; + +#if TCP_OVERSIZE + /* If remainder is last segment on the unsent, ensure we clear the oversize amount + * because the remainder is always sized to the exact remaining amount */ + if (seg->next == NULL) { + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + return ERR_OK; +memerr: + TCP_STATS_INC(tcp.memerr); + + LWIP_ASSERT("seg == NULL", seg == NULL); + if (p != NULL) { + pbuf_free(p); + } + + return ERR_MEM; +} + +/** + * Called by tcp_close() to send a segment including FIN flag but not data. + * This FIN may be added to an existing segment or a new, otherwise empty + * segment is enqueued. + * + * @param pcb the tcp_pcb over which to send a segment + * @return ERR_OK if sent, another err_t otherwise + */ +err_t +tcp_send_fin(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL); + + /* first, try to add the fin to the last unsent segment */ + if (pcb->unsent != NULL) { + struct tcp_seg *last_unsent; + for (last_unsent = pcb->unsent; last_unsent->next != NULL; + last_unsent = last_unsent->next); + + if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { + /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ + TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); + tcp_set_flags(pcb, TF_FIN); + return ERR_OK; + } + } + /* no data, no length, flags, copy=1, no optdata */ + return tcp_enqueue_flags(pcb, TCP_FIN); +} + +/** + * Enqueue SYN or FIN for transmission. + * + * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close + * (via @ref tcp_send_fin) + * + * @param pcb Protocol control block for the TCP connection. + * @param flags TCP header flags to set in the outgoing segment. + */ +err_t +tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) +{ + struct pbuf *p; + struct tcp_seg *seg; + u8_t optflags = 0; + u8_t optlen = 0; + + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); + + LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", + (flags & (TCP_SYN | TCP_FIN)) != 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL); + + /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */ + + /* Get options for this segment. This is a special case since this is the + only place where a SYN can be sent. */ + if (flags & TCP_SYN) { + optflags = TF_SEG_OPTS_MSS; +#if LWIP_WND_SCALE + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { + /* In a (sent in state SYN_RCVD), the window scale option may only + be sent if we received a window scale option from the remote host. */ + optflags |= TF_SEG_OPTS_WND_SCALE; + } +#endif /* LWIP_WND_SCALE */ +#if LWIP_TCP_SACK_OUT + if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) { + /* In a (sent in state SYN_RCVD), the SACK_PERM option may only + be sent if we received a SACK_PERM option from the remote host. */ + optflags |= TF_SEG_OPTS_SACK_PERM; + } +#endif /* LWIP_TCP_SACK_OUT */ + } +#if LWIP_TCP_TIMESTAMPS + if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) { + /* Make sure the timestamp option is only included in data segments if we + agreed about it with the remote host (and in active open SYN segments). */ + optflags |= TF_SEG_OPTS_TS; + } +#endif /* LWIP_TCP_TIMESTAMPS */ + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + + /* Allocate pbuf with room for TCP header + options */ + if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", + (p->len >= optlen)); + + /* Allocate memory for tcp_seg, and fill in fields. */ + if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { + tcp_set_flags(pcb, TF_NAGLEMEMERR); + TCP_STATS_INC(tcp.memerr); + return ERR_MEM; + } + LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); + LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, + ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", + lwip_ntohl(seg->tcphdr->seqno), + lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), + (u16_t)flags)); + + /* Now append seg to pcb->unsent queue */ + if (pcb->unsent == NULL) { + pcb->unsent = seg; + } else { + struct tcp_seg *useg; + for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); + useg->next = seg; + } +#if TCP_OVERSIZE + /* The new unsent tail has no space */ + pcb->unsent_oversize = 0; +#endif /* TCP_OVERSIZE */ + + /* SYN and FIN bump the sequence number */ + if ((flags & TCP_SYN) || (flags & TCP_FIN)) { + pcb->snd_lbb++; + /* optlen does not influence snd_buf */ + } + if (flags & TCP_FIN) { + tcp_set_flags(pcb, TF_FIN); + } + + /* update number of segments on the queues */ + pcb->snd_queuelen += pbuf_clen(seg->p); + LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); + if (pcb->snd_queuelen != 0) { + LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", + pcb->unacked != NULL || pcb->unsent != NULL); + } + + return ERR_OK; +} + +#if LWIP_TCP_TIMESTAMPS +/* Build a timestamp option (12 bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the timestamp option + */ +static void +tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts) +{ + LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL); + + /* Pad with two NOP options to make everything nicely aligned */ + opts[0] = PP_HTONL(0x0101080A); + opts[1] = lwip_htonl(sys_now()); + opts[2] = lwip_htonl(pcb->ts_recent); +} +#endif + +#if LWIP_TCP_SACK_OUT +/** + * Calculates the number of SACK entries that should be generated. + * It takes into account whether TF_SACK flag is set, + * the number of SACK entries in tcp_pcb that are valid, + * as well as the available options size. + * + * @param pcb tcp_pcb + * @param optlen the length of other TCP options (in bytes) + * @return the number of SACK ranges that can be used + */ +static u8_t +tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen) +{ + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL); + + if (pcb->flags & TF_SACK) { + u8_t i; + + /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options), + each additional one - 8 bytes. */ + optlen += 12; + + /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */ + for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) && + LWIP_TCP_SACK_VALID(pcb, i); ++i) { + ++num_sacks; + optlen += 8; + } + } + + return num_sacks; +} + +/** Build a SACK option (12 or more bytes long) at the specified options pointer) + * + * @param pcb tcp_pcb + * @param opts option pointer where to store the SACK option + * @param num_sacks the number of SACKs to store + */ +static void +tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks) +{ + u8_t i; + + LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL); + + /* Pad with two NOP options to make everything nicely aligned. + We add the length (of just the SACK option, not the NOPs in front of it), + which is 2B of header, plus 8B for each SACK. */ + *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8); + + for (i = 0; i < num_sacks; ++i) { + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left); + *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right); + } +} + +#endif + +#if LWIP_WND_SCALE +/** Build a window scale option (3 bytes long) at the specified options pointer) + * + * @param opts option pointer where to store the window scale option + */ +static void +tcp_build_wnd_scale_option(u32_t *opts) +{ + LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL); + + /* Pad with one NOP option to make everything nicely aligned */ + opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); +} +#endif + +/** + * @ingroup tcp_raw + * Find out what we can send and send it + * + * @param pcb Protocol control block for the TCP connection to send data + * @return ERR_OK if data has been sent or nothing to send + * another err_t on error + */ +err_t +tcp_output(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg, *useg; + u32_t wnd, snd_nxt; + err_t err; + struct netif *netif; +#if TCP_CWND_DEBUG + s16_t i = 0; +#endif /* TCP_CWND_DEBUG */ + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL); + /* pcb->state LISTEN not allowed here */ + LWIP_ASSERT("don't call tcp_output for listen-pcbs", + pcb->state != LISTEN); + + /* First, check if we are invoked by the TCP input processing + code. If so, we do not output anything. Instead, we rely on the + input processing code to call us when input processing is done + with. */ + if (tcp_input_pcb == pcb) { + return ERR_OK; + } + + wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); + + seg = pcb->unsent; + + if (seg == NULL) { + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", + (void *)pcb->unsent)); + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F + ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", seg == NULL, ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); + + /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct + * an empty ACK segment and send it. */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + /* nothing to send: shortcut out of here */ + goto output_done; + } else { + LWIP_DEBUGF(TCP_CWND_DEBUG, + ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F + ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); + } + + netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip); + if (netif == NULL) { + return ERR_RTE; + } + + /* If we don't have a local IP address, we get one from netif */ + if (ip_addr_isany(&pcb->local_ip)) { + const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); + if (local_ip == NULL) { + return ERR_RTE; + } + ip_addr_copy(pcb->local_ip, *local_ip); + } + + /* Handle the current segment not fitting within the window */ + if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) { + /* We need to start the persistent timer when the next unsent segment does not fit + * within the remaining (could be 0) send window and RTO timer is not running (we + * have no in-flight data). If window is still too small after persist timer fires, + * then we split the segment. We don't consider the congestion window since a cwnd + * smaller than 1 SMSS implies in-flight data + */ + if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) { + pcb->persist_cnt = 0; + pcb->persist_backoff = 1; + pcb->persist_probe = 0; + } + /* We need an ACK, but can't send data now, so send an empty ACK */ + if (pcb->flags & TF_ACK_NOW) { + return tcp_send_empty_ack(pcb); + } + goto output_done; + } + /* Stop persist timer, above conditions are not active */ + pcb->persist_backoff = 0; + + /* useg should point to last segment on unacked queue */ + useg = pcb->unacked; + if (useg != NULL) { + for (; useg->next != NULL; useg = useg->next); + } + /* data available and window allows it to be sent? */ + while (seg != NULL && + lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { + LWIP_ASSERT("RST not expected here!", + (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); + /* Stop sending if the nagle algorithm would prevent it + * Don't stop: + * - if tcp_write had a memory error before (prevent delayed ACK timeout) or + * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - + * either seg->next != NULL or pcb->unacked == NULL; + * RST is no sent using tcp_write/tcp_output. + */ + if ((tcp_do_output_nagle(pcb) == 0) && + ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { + break; + } +#if TCP_CWND_DEBUG + LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", + pcb->snd_wnd, pcb->cwnd, wnd, + lwip_ntohl(seg->tcphdr->seqno) + seg->len - + pcb->lastack, + lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); + ++i; +#endif /* TCP_CWND_DEBUG */ + + if (pcb->state != SYN_SENT) { + TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); + } + + err = tcp_output_segment(seg, pcb, netif); + if (err != ERR_OK) { + /* segment could not be sent, for whatever reason */ + tcp_set_flags(pcb, TF_NAGLEMEMERR); + return err; + } +#if TCP_OVERSIZE_DBGCHECK + seg->oversize_left = 0; +#endif /* TCP_OVERSIZE_DBGCHECK */ + pcb->unsent = seg->next; + if (pcb->state != SYN_SENT) { + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + /* put segment on unacknowledged list if length > 0 */ + if (TCP_TCPLEN(seg) > 0) { + seg->next = NULL; + /* unacked list is empty? */ + if (pcb->unacked == NULL) { + pcb->unacked = seg; + useg = seg; + /* unacked list is not empty? */ + } else { + /* In the case of fast retransmit, the packet should not go to the tail + * of the unacked queue, but rather somewhere before it. We need to check for + * this case. -STJ Jul 27, 2004 */ + if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { + /* add segment to before tail of unacked list, keeping the list sorted */ + struct tcp_seg **cur_seg = &(pcb->unacked); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = (*cur_seg); + (*cur_seg) = seg; + } else { + /* add segment to tail of unacked list */ + useg->next = seg; + useg = useg->next; + } + } + /* do not queue empty segments on the unacked list */ + } else { + tcp_seg_free(seg); + } + seg = pcb->unsent; + } +#if TCP_OVERSIZE + if (pcb->unsent == NULL) { + /* last unsent has been removed, reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + +output_done: + tcp_clear_flags(pcb, TF_NAGLEMEMERR); + return ERR_OK; +} + +/** Check if a segment's pbufs are used by someone else than TCP. + * This can happen on retransmission if the pbuf of this segment is still + * referenced by the netif driver due to deferred transmission. + * This is the case (only!) if someone down the TX call path called + * pbuf_ref() on one of the pbufs! + * + * @arg seg the tcp segment to check + * @return 1 if ref != 1, 0 if ref == 1 + */ +static int +tcp_output_segment_busy(const struct tcp_seg *seg) +{ + LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL); + + /* We only need to check the first pbuf here: + If a pbuf is queued for transmission, a driver calls pbuf_ref(), + which only changes the ref count of the first pbuf */ + if (seg->p->ref != 1) { + /* other reference found */ + return 1; + } + /* no other references found */ + return 0; +} + +/** + * Called by tcp_output() to actually send a TCP segment over IP. + * + * @param seg the tcp_seg to send + * @param pcb the tcp_pcb for the TCP connection used to send the segment + * @param netif the netif used to send the segment + */ +static err_t +tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) +{ + err_t err; + u16_t len; + u32_t *opts; +#if TCP_CHECKSUM_ON_COPY + int seg_chksum_was_swapped = 0; +#endif + + LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL); + LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL); + LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL); + + if (tcp_output_segment_busy(seg)) { + /* This should not happen: rexmit functions should have checked this. + However, since this function modifies p->len, we must not continue in this case. */ + LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n")); + return ERR_OK; + } + + /* The TCP header has already been constructed, but the ackno and + wnd fields remain. */ + seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); + + /* advertise our receive window size in this TCP segment */ +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + /* The Window field in a SYN segment itself (the only type where we send + the window scale option) is never scaled. */ + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); + } else +#endif /* LWIP_WND_SCALE */ + { + seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + } + + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + + /* Add any requested options. NB MSS option is only set on SYN + packets, so ignore it here */ + /* cast through void* to get rid of alignment warnings */ + opts = (u32_t *)(void *)(seg->tcphdr + 1); + if (seg->flags & TF_SEG_OPTS_MSS) { + u16_t mss; +#if TCP_CALCULATE_EFF_SEND_MSS + mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip); +#else /* TCP_CALCULATE_EFF_SEND_MSS */ + mss = TCP_MSS; +#endif /* TCP_CALCULATE_EFF_SEND_MSS */ + *opts = TCP_BUILD_MSS_OPTION(mss); + opts += 1; + } +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; + + if (seg->flags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif +#if LWIP_WND_SCALE + if (seg->flags & TF_SEG_OPTS_WND_SCALE) { + tcp_build_wnd_scale_option(opts); + opts += 1; + } +#endif +#if LWIP_TCP_SACK_OUT + if (seg->flags & TF_SEG_OPTS_SACK_PERM) { + /* Pad with two NOP options to make everything nicely aligned + * NOTE: When we send both timestamp and SACK_PERM options, + * we could use the first two NOPs before the timestamp to store SACK_PERM option, + * but that would complicate the code. + */ + *(opts++) = PP_HTONL(0x01010402); + } +#endif + + /* Set retransmission timer running if it is not currently enabled + This must be set before checking the route. */ + if (pcb->rtime < 0) { + pcb->rtime = 0; + } + + if (pcb->rttest == 0) { + pcb->rttest = tcp_ticks; + pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); + + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); + } + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", + lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + + seg->len)); + + len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); + if (len == 0) { + /** Exclude retransmitted segments from this count. */ + MIB2_STATS_INC(mib2.tcpoutsegs); + } + + seg->p->len -= len; + seg->p->tot_len -= len; + + seg->p->payload = seg->tcphdr; + + seg->tcphdr->chksum = 0; + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts); +#endif + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb)); + +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { +#if TCP_CHECKSUM_ON_COPY + u32_t acc; +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ + if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { + LWIP_ASSERT("data included but not checksummed", + seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr)); + } + + /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ + acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, + seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip); + /* add payload checksum */ + if (seg->chksum_swapped) { + seg_chksum_was_swapped = 1; + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 0; + } + acc = (u16_t)~acc + seg->chksum; + seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc); +#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK + if (chksum_slow != seg->tcphdr->chksum) { + TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( + ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", + seg->tcphdr->chksum, chksum_slow)); + seg->tcphdr->chksum = chksum_slow; + } +#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ +#else /* TCP_CHECKSUM_ON_COPY */ + seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, + seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); +#endif /* TCP_CHECKSUM_ON_COPY */ + } +#endif /* CHECKSUM_GEN_TCP */ + TCP_STATS_INC(tcp.xmit); + + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, + pcb->tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + +#if TCP_CHECKSUM_ON_COPY + if (seg_chksum_was_swapped) { + /* if data is added to this segment later, chksum needs to be swapped, + so restore this now */ + seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); + seg->chksum_swapped = 1; + } +#endif + + return err; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +err_t +tcp_rexmit_rto_prepare(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + + LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + /* Move all unacked segments to the head of the unsent queue. + However, give up if any of the unsent pbufs are still referenced by the + netif driver due to deferred transmission. No point loading the link further + if it is struggling to flush its buffered writes. */ + for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) { + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + } + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n")); + return ERR_VAL; + } + /* concatenate unsent queue after unacked queue */ + seg->next = pcb->unsent; +#if TCP_OVERSIZE_DBGCHECK + /* if last unsent changed, we need to update unsent_oversize */ + if (pcb->unsent == NULL) { + pcb->unsent_oversize = seg->oversize_left; + } +#endif /* TCP_OVERSIZE_DBGCHECK */ + /* unsent queue is the concatenated queue (of unacked, unsent) */ + pcb->unsent = pcb->unacked; + /* unacked queue is now empty */ + pcb->unacked = NULL; + + /* Mark RTO in-progress */ + tcp_set_flags(pcb, TF_RTO); + /* Record the next byte following retransmit */ + pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); + /* Don't take any RTT measurements after retransmitting. */ + pcb->rttest = 0; + + return ERR_OK; +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_slowtmr() for slow retransmission. + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto_commit(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL); + + /* increment number of retransmissions */ + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + /* Do the actual retransmission */ + tcp_output(pcb); +} + +/** + * Requeue all unacked segments for retransmission + * + * Called by tcp_process() only, tcp_slowtmr() needs to do some things between + * "prepare" and "commit". + * + * @param pcb the tcp_pcb for which to re-enqueue all unacked segments + */ +void +tcp_rexmit_rto(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL); + + if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) { + tcp_rexmit_rto_commit(pcb); + } +} + +/** + * Requeue the first unacked segment for retransmission + * + * Called by tcp_receive() for fast retransmit. + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +err_t +tcp_rexmit(struct tcp_pcb *pcb) +{ + struct tcp_seg *seg; + struct tcp_seg **cur_seg; + + LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL); + + if (pcb->unacked == NULL) { + return ERR_VAL; + } + + seg = pcb->unacked; + + /* Give up if the segment is still referenced by the netif driver + due to deferred transmission. */ + if (tcp_output_segment_busy(seg)) { + LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n")); + return ERR_VAL; + } + + /* Move the first unacked segment to the unsent queue */ + /* Keep the unsent queue sorted. */ + pcb->unacked = seg->next; + + cur_seg = &(pcb->unsent); + while (*cur_seg && + TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { + cur_seg = &((*cur_seg)->next ); + } + seg->next = *cur_seg; + *cur_seg = seg; +#if TCP_OVERSIZE + if (seg->next == NULL) { + /* the retransmitted segment is last in unsent, so reset unsent_oversize */ + pcb->unsent_oversize = 0; + } +#endif /* TCP_OVERSIZE */ + + if (pcb->nrtx < 0xFF) { + ++pcb->nrtx; + } + + /* Don't take any rtt measurements after retransmitting. */ + pcb->rttest = 0; + + /* Do the actual retransmission. */ + MIB2_STATS_INC(mib2.tcpretranssegs); + /* No need to call tcp_output: we are always called from tcp_input() + and thus tcp_output directly returns. */ + return ERR_OK; +} + + +/** + * Handle retransmission after three dupacks received + * + * @param pcb the tcp_pcb for which to retransmit the first unacked segment + */ +void +tcp_rexmit_fast(struct tcp_pcb *pcb) +{ + LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL); + + if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { + /* This is fast retransmit. Retransmit the first unacked segment. */ + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: dupacks %"U16_F" (%"U32_F + "), fast retransmit %"U32_F"\n", + (u16_t)pcb->dupacks, pcb->lastack, + lwip_ntohl(pcb->unacked->tcphdr->seqno))); + if (tcp_rexmit(pcb) == ERR_OK) { + /* Set ssthresh to half of the minimum of the current + * cwnd and the advertised window */ + pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; + + /* The minimum value for ssthresh should be 2 MSS */ + if (pcb->ssthresh < (2U * pcb->mss)) { + LWIP_DEBUGF(TCP_FR_DEBUG, + ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F + " should be min 2 mss %"U16_F"...\n", + pcb->ssthresh, (u16_t)(2 * pcb->mss))); + pcb->ssthresh = 2 * pcb->mss; + } + + pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; + tcp_set_flags(pcb, TF_INFR); + + /* Reset the retransmission timer to prevent immediate rto retransmissions */ + pcb->rtime = 0; + } + } +} + +static struct pbuf * +tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */, + u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd) +{ + struct tcp_hdr *tcphdr; + struct pbuf *p; + + p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); + if (p != NULL) { + LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", + (p->len >= TCP_HLEN + optlen)); + tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->src = lwip_htons(src_port); + tcphdr->dest = lwip_htons(dst_port); + tcphdr->seqno = seqno_be; + tcphdr->ackno = lwip_htonl(ackno); + TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags); + tcphdr->wnd = lwip_htons(wnd); + tcphdr->chksum = 0; + tcphdr->urgp = 0; + } + return p; +} + +/** Allocate a pbuf and create a tcphdr at p->payload, used for output + * functions other than the default tcp_output -> tcp_output_segment + * (e.g. tcp_send_empty_ack, etc.) + * + * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) + * @param optlen length of header-options + * @param datalen length of tcp data to reserve in pbuf + * @param seqno_be seqno in network byte order (big-endian) + * @return pbuf with p->payload being the tcp_hdr + */ +static struct pbuf * +tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, + u32_t seqno_be /* already in network byte order */) +{ + struct pbuf *p; + + LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL); + + p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen, + seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK, + TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); + if (p != NULL) { + /* If we're sending a packet, update the announced right window edge */ + pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; + } + return p; +} + +/* Fill in options for control segments */ +static void +tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks) +{ + struct tcp_hdr *tcphdr; + u32_t *opts; + u16_t sacks_len = 0; + + LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL); + + tcphdr = (struct tcp_hdr *)p->payload; + opts = (u32_t *)(void *)(tcphdr + 1); + + /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ + +#if LWIP_TCP_TIMESTAMPS + if (optflags & TF_SEG_OPTS_TS) { + tcp_build_timestamp_option(pcb, opts); + opts += 3; + } +#endif + +#if LWIP_TCP_SACK_OUT + if (pcb && (num_sacks > 0)) { + tcp_build_sack_option(pcb, opts, num_sacks); + /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */ + sacks_len = 1 + num_sacks * 2; + opts += sacks_len; + } +#else + LWIP_UNUSED_ARG(num_sacks); +#endif + +#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS + opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts); +#endif + + LWIP_UNUSED_ARG(pcb); + LWIP_UNUSED_ARG(sacks_len); + LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb)); + LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */ + LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */ +} + +/** Output a control segment pbuf to IP. + * + * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe, + * this function combines selecting a netif for transmission, generating the tcp + * header checksum and calling ip_output_if while handling netif hints and stats. + */ +static err_t +tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p, + const ip_addr_t *src, const ip_addr_t *dst) +{ + err_t err; + struct netif *netif; + + LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL); + + netif = tcp_route(pcb, src, dst); + if (netif == NULL) { + err = ERR_RTE; + } else { + u8_t ttl, tos; +#if CHECKSUM_GEN_TCP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { + struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; + tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, + src, dst); + } +#endif + if (pcb != NULL) { + NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints))); + ttl = pcb->ttl; + tos = pcb->tos; + } else { + /* Send output with hardcoded TTL/HL since we have no access to the pcb */ + ttl = TCP_TTL; + tos = 0; + } + TCP_STATS_INC(tcp.xmit); + err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif); + NETIF_RESET_HINTS(netif); + } + pbuf_free(p); + return err; +} + +/** + * Send a TCP RESET packet (empty segment with RST flag set) either to + * abort a connection or to show that there is no matching local connection + * for a received segment. + * + * Called by tcp_abort() (to abort a local connection), tcp_input() (if no + * matching local pcb was found), tcp_listen_input() (if incoming segment + * has ACK flag set) and tcp_process() (received segment in the wrong state) + * + * Since a RST segment is in most cases not sent for an active connection, + * tcp_rst() has a number of arguments that are taken from a tcp_pcb for + * most other segment output functions. + * + * @param pcb TCP pcb (may be NULL if no pcb is available) + * @param seqno the sequence number to use for the outgoing segment + * @param ackno the acknowledge number to use for the outgoing segment + * @param local_ip the local IP address to send the segment from + * @param remote_ip the remote IP address to send the segment to + * @param local_port the local TCP port to send the segment from + * @param remote_port the remote TCP port to send the segment to + */ +void +tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno, + const ip_addr_t *local_ip, const ip_addr_t *remote_ip, + u16_t local_port, u16_t remote_port) +{ + struct pbuf *p; + u16_t wnd; + u8_t optlen; + + LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL); + LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL); + + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + +#if LWIP_WND_SCALE + wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); +#else + wnd = PP_HTONS(TCP_WND); +#endif + + p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port, + remote_port, TCP_RST | TCP_ACK, wnd); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); + return; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + MIB2_STATS_INC(mib2.tcpoutrsts); + + tcp_output_control_segment(pcb, p, local_ip, remote_ip); + LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); +} + +/** + * Send an ACK without data. + * + * @param pcb Protocol control block for the TCP connection to send the ACK + */ +err_t +tcp_send_empty_ack(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen, optflags = 0; + u8_t num_sacks = 0; + + LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL); + +#if LWIP_TCP_TIMESTAMPS + if (pcb->flags & TF_TIMESTAMP) { + optflags = TF_SEG_OPTS_TS; + } +#endif + optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb); + +#if LWIP_TCP_SACK_OUT + /* For now, SACKs are only sent with empty ACKs */ + if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) { + optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */ + } +#endif + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); + if (p == NULL) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); + return ERR_BUF; + } + tcp_output_fill_options(pcb, p, optflags, num_sacks); + +#if LWIP_TCP_TIMESTAMPS + pcb->ts_lastacksent = pcb->rcv_nxt; +#endif + + LWIP_DEBUGF(TCP_OUTPUT_DEBUG, + ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + if (err != ERR_OK) { + /* let tcp_fasttmr retry sending this ACK */ + tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } else { + /* remove ACK flags from the PCB, as we sent an empty ACK now */ + tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW); + } + + return err; +} + +/** + * Send keepalive packets to keep a connection active although + * no data is sent over it. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a keepalive packet + */ +err_t +tcp_keepalive(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1)); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_keepalive: could not allocate memory for pbuf\n")); + return ERR_MEM; + } + tcp_output_fill_options(pcb, p, 0, optlen); + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} + +/** + * Send persist timer zero-window probes to keep a connection active + * when a window update is lost. + * + * Called by tcp_slowtmr() + * + * @param pcb the tcp_pcb for which to send a zero-window probe packet + */ +err_t +tcp_zero_window_probe(struct tcp_pcb *pcb) +{ + err_t err; + struct pbuf *p; + struct tcp_hdr *tcphdr; + struct tcp_seg *seg; + u16_t len; + u8_t is_fin; + u32_t snd_nxt; + u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb); + + LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); + ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(TCP_DEBUG, ("\n")); + + LWIP_DEBUGF(TCP_DEBUG, + ("tcp_zero_window_probe: tcp_ticks %"U32_F + " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", + tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); + + /* Only consider unsent, persist timer should be off when there is data in-flight */ + seg = pcb->unsent; + if (seg == NULL) { + /* Not expected, persist timer should be off when the send buffer is empty */ + return ERR_OK; + } + + /* increment probe count. NOTE: we record probe even if it fails + to actually transmit due to an error. This ensures memory exhaustion/ + routing problem doesn't leave a zero-window pcb as an indefinite zombie. + RTO mechanism has similar behavior, see pcb->nrtx */ + if (pcb->persist_probe < 0xFF) { + ++pcb->persist_probe; + } + + is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); + /* we want to send one seqno: either FIN or data (no options) */ + len = is_fin ? 0 : 1; + + p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno); + if (p == NULL) { + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); + return ERR_MEM; + } + tcphdr = (struct tcp_hdr *)p->payload; + + if (is_fin) { + /* FIN segment, no data */ + TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); + } else { + /* Data segment, copy in one byte from the head of the unacked queue */ + char *d = ((char *)p->payload + TCP_HLEN); + /* Depending on whether the segment has already been sent (unacked) or not + (unsent), seg->p->payload points to the IP header or TCP header. + Ensure we copy the first TCP data byte: */ + pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); + } + + /* The byte may be acknowledged without the window being opened. */ + snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; + if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { + pcb->snd_nxt = snd_nxt; + } + tcp_output_fill_options(pcb, p, 0, optlen); + + err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip); + + LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F + " ackno %"U32_F" err %d.\n", + pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); + return err; +} +#endif /* LWIP_TCP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c new file mode 100644 index 0000000..c3431e8 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/timeouts.c @@ -0,0 +1,451 @@ +/** + * @file + * Stack-internal timers implementation. + * This file includes timer callbacks for stack-internal timers as well as + * functions to set up or stop timers and check for expired timers. + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * Simon Goldschmidt + * + */ + +#include "lwip/opt.h" + +#include "lwip/timeouts.h" +#include "lwip/priv/tcp_priv.h" + +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/priv/tcpip_priv.h" + +#include "lwip/ip4_frag.h" +#include "lwip/etharp.h" +#include "lwip/dhcp.h" +#include "lwip/autoip.h" +#include "lwip/igmp.h" +#include "lwip/dns.h" +#include "lwip/nd6.h" +#include "lwip/ip6_frag.h" +#include "lwip/mld6.h" +#include "lwip/dhcp6.h" +#include "lwip/sys.h" +#include "lwip/pbuf.h" + +#if LWIP_DEBUG_TIMERNAMES +#define HANDLER(x) x, #x +#else /* LWIP_DEBUG_TIMERNAMES */ +#define HANDLER(x) x +#endif /* LWIP_DEBUG_TIMERNAMES */ + +#define LWIP_MAX_TIMEOUT 0x7fffffff + +/* Check if timer's expiry time is greater than time and care about u32_t wraparounds */ +#define TIME_LESS_THAN(t, compare_to) ( (((u32_t)((t)-(compare_to))) > LWIP_MAX_TIMEOUT) ? 1 : 0 ) + +/** This array contains all stack-internal cyclic timers. To get the number of + * timers, use LWIP_ARRAYSIZE() */ +const struct lwip_cyclic_timer lwip_cyclic_timers[] = { +#if LWIP_TCP + /* The TCP timer is a special case: it does not have to run always and + is triggered to start from TCP using tcp_timer_needed() */ + {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, +#endif /* LWIP_TCP */ +#if LWIP_IPV4 +#if IP_REASSEMBLY + {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, +#endif /* IP_REASSEMBLY */ +#if LWIP_ARP + {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, +#endif /* LWIP_ARP */ +#if LWIP_DHCP + {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, + {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, +#endif /* LWIP_DHCP */ +#if LWIP_AUTOIP + {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, +#endif /* LWIP_AUTOIP */ +#if LWIP_IGMP + {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, +#endif /* LWIP_IGMP */ +#endif /* LWIP_IPV4 */ +#if LWIP_DNS + {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, +#endif /* LWIP_DNS */ +#if LWIP_IPV6 + {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, +#if LWIP_IPV6_REASS + {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, +#endif /* LWIP_IPV6_REASS */ +#if LWIP_IPV6_MLD + {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, +#endif /* LWIP_IPV6_MLD */ +#if LWIP_IPV6_DHCP6 + {DHCP6_TIMER_MSECS, HANDLER(dhcp6_tmr)}, +#endif /* LWIP_IPV6_DHCP6 */ +#endif /* LWIP_IPV6 */ +}; +const int lwip_num_cyclic_timers = LWIP_ARRAYSIZE(lwip_cyclic_timers); + +#if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM + +/** The one and only timeout list */ +static struct sys_timeo *next_timeout; + +static u32_t current_timeout_due_time; + +#if LWIP_TESTMODE +struct sys_timeo** +sys_timeouts_get_next_timeout(void) +{ + return &next_timeout; +} +#endif + +#if LWIP_TCP +/** global variable that shows if the tcp timer is currently scheduled or not */ +static int tcpip_tcp_timer_active; + +/** + * Timer callback function that calls tcp_tmr() and reschedules itself. + * + * @param arg unused argument + */ +static void +tcpip_tcp_timer(void *arg) +{ + LWIP_UNUSED_ARG(arg); + + /* call TCP timer handler */ + tcp_tmr(); + /* timer still needed? */ + if (tcp_active_pcbs || tcp_tw_pcbs) { + /* restart timer */ + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } else { + /* disable timer */ + tcpip_tcp_timer_active = 0; + } +} + +/** + * Called from TCP_REG when registering a new PCB: + * the reason is to have the TCP timer only running when + * there are active (or time-wait) PCBs. + */ +void +tcp_timer_needed(void) +{ + LWIP_ASSERT_CORE_LOCKED(); + + /* timer is off but needed again? */ + if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { + /* enable and start timer */ + tcpip_tcp_timer_active = 1; + sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); + } +} +#endif /* LWIP_TCP */ + +static void +#if LWIP_DEBUG_TIMERNAMES +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +sys_timeout_abs(u32_t abs_time, sys_timeout_handler handler, void *arg) +#endif +{ + struct sys_timeo *timeout, *t; + + timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); + if (timeout == NULL) { + LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); + return; + } + + timeout->next = NULL; + timeout->h = handler; + timeout->arg = arg; + timeout->time = abs_time; + +#if LWIP_DEBUG_TIMERNAMES + timeout->handler_name = handler_name; + LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p abs_time=%"U32_F" handler=%s arg=%p\n", + (void *)timeout, abs_time, handler_name, (void *)arg)); +#endif /* LWIP_DEBUG_TIMERNAMES */ + + if (next_timeout == NULL) { + next_timeout = timeout; + return; + } + if (TIME_LESS_THAN(timeout->time, next_timeout->time)) { + timeout->next = next_timeout; + next_timeout = timeout; + } else { + for (t = next_timeout; t != NULL; t = t->next) { + if ((t->next == NULL) || TIME_LESS_THAN(timeout->time, t->next->time)) { + timeout->next = t->next; + t->next = timeout; + break; + } + } + } +} + +/** + * Timer callback function that calls cyclic->handler() and reschedules itself. + * + * @param arg unused argument + */ +#if !LWIP_TESTMODE +static +#endif +void +lwip_cyclic_timer(void *arg) +{ + u32_t now; + u32_t next_timeout_time; + const struct lwip_cyclic_timer *cyclic = (const struct lwip_cyclic_timer *)arg; + +#if LWIP_DEBUG_TIMERNAMES + LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); +#endif + cyclic->handler(); + + now = sys_now(); + next_timeout_time = (u32_t)(current_timeout_due_time + cyclic->interval_ms); /* overflow handled by TIME_LESS_THAN macro */ + if (TIME_LESS_THAN(next_timeout_time, now)) { + /* timer would immediately expire again -> "overload" -> restart without any correction */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs((u32_t)(now + cyclic->interval_ms), lwip_cyclic_timer, arg); +#endif + + } else { + /* correct cyclic interval with handler execution delay and sys_check_timeouts jitter */ +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg, cyclic->handler_name); +#else + sys_timeout_abs(next_timeout_time, lwip_cyclic_timer, arg); +#endif + } +} + +/** Initialize this module */ +void sys_timeouts_init(void) +{ + size_t i; + /* tcp_tmr() at index 0 is started on demand */ + for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { + /* we have to cast via size_t to get rid of const warning + (this is OK as cyclic_timer() casts back to const* */ + sys_timeout(lwip_cyclic_timers[i].interval_ms, lwip_cyclic_timer, LWIP_CONST_CAST(void *, &lwip_cyclic_timers[i])); + } +} + +/** + * Create a one-shot timer (aka timeout). Timeouts are processed in the + * following cases: + * - while waiting for a message using sys_timeouts_mbox_fetch() + * - by calling sys_check_timeouts() (NO_SYS==1 only) + * + * @param msecs time in milliseconds after that the timer should expire + * @param handler callback function to call when msecs have elapsed + * @param arg argument to pass to the callback function + */ +#if LWIP_DEBUG_TIMERNAMES +void +sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char *handler_name) +#else /* LWIP_DEBUG_TIMERNAMES */ +void +sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) +#endif /* LWIP_DEBUG_TIMERNAMES */ +{ + u32_t next_timeout_time; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("Timeout time too long, max is LWIP_UINT32_MAX/4 msecs", msecs <= (LWIP_UINT32_MAX / 4)); + + next_timeout_time = (u32_t)(sys_now() + msecs); /* overflow handled by TIME_LESS_THAN macro */ + +#if LWIP_DEBUG_TIMERNAMES + sys_timeout_abs(next_timeout_time, handler, arg, handler_name); +#else + sys_timeout_abs(next_timeout_time, handler, arg); +#endif +} + +/** + * Go through timeout list (for this task only) and remove the first matching + * entry (subsequent entries remain untouched), even though the timeout has not + * triggered yet. + * + * @param handler callback function that would be called by the timeout + * @param arg callback argument that would be passed to handler +*/ +void +sys_untimeout(sys_timeout_handler handler, void *arg) +{ + struct sys_timeo *prev_t, *t; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return; + } + + for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { + if ((t->h == handler) && (t->arg == arg)) { + /* We have a match */ + /* Unlink from previous in list */ + if (prev_t == NULL) { + next_timeout = t->next; + } else { + prev_t->next = t->next; + } + memp_free(MEMP_SYS_TIMEOUT, t); + return; + } + } + return; +} + +/** + * @ingroup lwip_nosys + * Handle timeouts for NO_SYS==1 (i.e. without using + * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout + * handler functions when timeouts expire. + * + * Must be called periodically from your main loop. + */ +void +sys_check_timeouts(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + /* Process only timers expired at the start of the function. */ + now = sys_now(); + + do { + struct sys_timeo *tmptimeout; + sys_timeout_handler handler; + void *arg; + + PBUF_CHECK_FREE_OOSEQ(); + + tmptimeout = next_timeout; + if (tmptimeout == NULL) { + return; + } + + if (TIME_LESS_THAN(now, tmptimeout->time)) { + return; + } + + /* Timeout has expired */ + next_timeout = tmptimeout->next; + handler = tmptimeout->h; + arg = tmptimeout->arg; + current_timeout_due_time = tmptimeout->time; +#if LWIP_DEBUG_TIMERNAMES + if (handler != NULL) { + LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s t=%"U32_F" arg=%p\n", + tmptimeout->handler_name, sys_now() - tmptimeout->time, arg)); + } +#endif /* LWIP_DEBUG_TIMERNAMES */ + memp_free(MEMP_SYS_TIMEOUT, tmptimeout); + if (handler != NULL) { + handler(arg); + } + LWIP_TCPIP_THREAD_ALIVE(); + + /* Repeat until all expired timers have been called */ + } while (1); +} + +/** Rebase the timeout times to the current time. + * This is necessary if sys_check_timeouts() hasn't been called for a long + * time (e.g. while saving energy) to prevent all timer functions of that + * period being called. + */ +void +sys_restart_timeouts(void) +{ + u32_t now; + u32_t base; + struct sys_timeo *t; + + if (next_timeout == NULL) { + return; + } + + now = sys_now(); + base = next_timeout->time; + + for (t = next_timeout; t != NULL; t = t->next) { + t->time = (t->time - base) + now; + } +} + +/** Return the time left before the next timeout is due. If no timeouts are + * enqueued, returns 0xffffffff + */ +u32_t +sys_timeouts_sleeptime(void) +{ + u32_t now; + + LWIP_ASSERT_CORE_LOCKED(); + + if (next_timeout == NULL) { + return SYS_TIMEOUTS_SLEEPTIME_INFINITE; + } + now = sys_now(); + if (TIME_LESS_THAN(next_timeout->time, now)) { + return 0; + } else { + u32_t ret = (u32_t)(next_timeout->time - now); + LWIP_ASSERT("invalid sleeptime", ret <= LWIP_MAX_TIMEOUT); + return ret; + } +} + +#else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ +/* Satisfy the TCP code which calls this function */ +void +tcp_timer_needed(void) +{ +} +#endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c new file mode 100644 index 0000000..4f45f8d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/core/udp.c @@ -0,0 +1,1314 @@ +/** + * @file + * User Datagram Protocol module\n + * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n + * See also @ref udp_raw + * + * @defgroup udp_raw UDP + * @ingroup callbackstyle_api + * User Datagram Protocol module\n + * @see @ref api + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! + */ + +#include "lwip/opt.h" + +#if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/udp.h" +#include "lwip/def.h" +#include "lwip/memp.h" +#include "lwip/inet_chksum.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/icmp.h" +#include "lwip/icmp6.h" +#include "lwip/stats.h" +#include "lwip/snmp.h" +#include "lwip/dhcp.h" + +#include + +#ifndef UDP_LOCAL_PORT_RANGE_START +/* From http://www.iana.org/assignments/port-numbers: + "The Dynamic and/or Private Ports are those from 49152 through 65535" */ +#define UDP_LOCAL_PORT_RANGE_START 0xc000 +#define UDP_LOCAL_PORT_RANGE_END 0xffff +#define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & (u16_t)~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) +#endif + +/* last local UDP port */ +static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; + +/* The list of UDP PCBs */ +/* exported in udp.h (was static) */ +struct udp_pcb *udp_pcbs; + +/** + * Initialize this module. + */ +void +udp_init(void) +{ +#ifdef LWIP_RAND + udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); +#endif /* LWIP_RAND */ +} + +/** + * Allocate a new local UDP port. + * + * @return a new (free) local UDP port number + */ +static u16_t +udp_new_port(void) +{ + u16_t n = 0; + struct udp_pcb *pcb; + +again: + if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { + udp_port = UDP_LOCAL_PORT_RANGE_START; + } + /* Check all PCBs. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + if (pcb->local_port == udp_port) { + if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { + return 0; + } + goto again; + } + } + return udp_port; +} + +/** Common code to see if the current input packet matches the pcb + * (current input packet is accessed via ip(4/6)_current_* macros) + * + * @param pcb pcb to check + * @param inp network interface on which the datagram was received (only used for IPv4) + * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) + * @return 1 on match, 0 otherwise + */ +static u8_t +udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) +{ + LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ + LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ + + LWIP_ASSERT("udp_input_local_match: invalid pcb", pcb != NULL); + LWIP_ASSERT("udp_input_local_match: invalid netif", inp != NULL); + + /* check if PCB is bound to specific netif */ + if ((pcb->netif_idx != NETIF_NO_INDEX) && + (pcb->netif_idx != netif_get_index(ip_data.current_input_netif))) { + return 0; + } + + /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { +#if LWIP_IPV4 && IP_SOF_BROADCAST_RECV + if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { + return 0; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ + return 1; + } + + /* Only need to check PCB if incoming IP version matches PCB IP version */ + if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { +#if LWIP_IPV4 + /* Special case: IPv4 broadcast: all or broadcasts in my subnet + * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ + if (broadcast != 0) { +#if IP_SOF_BROADCAST_RECV + if (ip_get_option(pcb, SOF_BROADCAST)) +#endif /* IP_SOF_BROADCAST_RECV */ + { + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || + ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { + return 1; + } + } + } else +#endif /* LWIP_IPV4 */ + /* Handle IPv4 and IPv6: all or exact match */ + if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { + return 1; + } + } + + return 0; +} + +/** + * Process an incoming UDP datagram. + * + * Given an incoming UDP datagram (as a chain of pbufs) this function + * finds a corresponding UDP PCB and hands over the pbuf to the pcbs + * recv function. If no pcb is found or the datagram is incorrect, the + * pbuf is freed. + * + * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) + * @param inp network interface on which the datagram was received. + * + */ +void +udp_input(struct pbuf *p, struct netif *inp) +{ + struct udp_hdr *udphdr; + struct udp_pcb *pcb, *prev; + struct udp_pcb *uncon_pcb; + u16_t src, dest; + u8_t broadcast; + u8_t for_us = 0; + + LWIP_UNUSED_ARG(inp); + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ASSERT("udp_input: invalid pbuf", p != NULL); + LWIP_ASSERT("udp_input: invalid netif", inp != NULL); + + PERF_START; + + UDP_STATS_INC(udp.recv); + + /* Check minimum length (UDP header) */ + if (p->len < UDP_HLEN) { + /* drop short packets */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); + UDP_STATS_INC(udp.lenerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + udphdr = (struct udp_hdr *)p->payload; + + /* is broadcast packet ? */ + broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); + + LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); + + /* convert src and dest ports to host byte order */ + src = lwip_ntohs(udphdr->src); + dest = lwip_ntohs(udphdr->dest); + + udp_debug_print(udphdr); + + /* print the UDP source and destination */ + LWIP_DEBUGF(UDP_DEBUG, ("udp (")); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_dest_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); + ip_addr_debug_print_val(UDP_DEBUG, *ip_current_src_addr()); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); + + pcb = NULL; + prev = NULL; + uncon_pcb = NULL; + /* Iterate through the UDP pcb list for a matching pcb. + * 'Perfect match' pcbs (connected to the remote port & ip address) are + * preferred. If no perfect match is found, the first unconnected pcb that + * matches the local port and ip address gets the datagram. */ + for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { + /* print the PCB local and remote address */ + LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); + ip_addr_debug_print_val(UDP_DEBUG, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); + ip_addr_debug_print_val(UDP_DEBUG, pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); + + /* compare PCB local addr+port to UDP destination addr+port */ + if ((pcb->local_port == dest) && + (udp_input_local_match(pcb, inp, broadcast) != 0)) { + if ((pcb->flags & UDP_FLAGS_CONNECTED) == 0) { + if (uncon_pcb == NULL) { + /* the first unconnected matching PCB */ + uncon_pcb = pcb; +#if LWIP_IPV4 + } else if (broadcast && ip4_current_dest_addr()->addr == IPADDR_BROADCAST) { + /* global broadcast address (only valid for IPv4; match was checked before) */ + if (!IP_IS_V4_VAL(uncon_pcb->local_ip) || !ip4_addr_cmp(ip_2_ip4(&uncon_pcb->local_ip), netif_ip4_addr(inp))) { + /* uncon_pcb does not match the input netif, check this pcb */ + if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(ip_2_ip4(&pcb->local_ip), netif_ip4_addr(inp))) { + /* better match */ + uncon_pcb = pcb; + } + } +#endif /* LWIP_IPV4 */ + } +#if SO_REUSE + else if (!ip_addr_isany(&pcb->local_ip)) { + /* prefer specific IPs over catch-all */ + uncon_pcb = pcb; + } +#endif /* SO_REUSE */ + } + + /* compare PCB remote addr+port to UDP source addr+port */ + if ((pcb->remote_port == src) && + (ip_addr_isany_val(pcb->remote_ip) || + ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { + /* the first fully matching PCB */ + if (prev != NULL) { + /* move the pcb to the front of udp_pcbs so that is + found faster next time */ + prev->next = pcb->next; + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } else { + UDP_STATS_INC(udp.cachehit); + } + break; + } + } + + prev = pcb; + } + /* no fully matching pcb found? then look for an unconnected pcb */ + if (pcb == NULL) { + pcb = uncon_pcb; + } + + /* Check checksum if this is a match or if it was directed at us. */ + if (pcb != NULL) { + for_us = 1; + } else { +#if LWIP_IPV6 + if (ip_current_is_v6()) { + for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 + if (!ip_current_is_v6()) { + for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); + } +#endif /* LWIP_IPV4 */ + } + + if (for_us) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); +#if CHECKSUM_CHECK_UDP + IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_UDP) { +#if LWIP_UDPLITE + if (ip_current_header_proto() == IP_PROTO_UDPLITE) { + /* Do the UDP Lite checksum */ + u16_t chklen = lwip_ntohs(udphdr->len); + if (chklen < sizeof(struct udp_hdr)) { + if (chklen == 0) { + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet (See RFC 3828 chap. 3.1) */ + chklen = p->tot_len; + } else { + /* At least the UDP-Lite header must be covered by the + checksum! (Again, see RFC 3828 chap. 3.1) */ + goto chkerr; + } + } + if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, + p->tot_len, chklen, + ip_current_src_addr(), ip_current_dest_addr()) != 0) { + goto chkerr; + } + } else +#endif /* LWIP_UDPLITE */ + { + if (udphdr->chksum != 0) { + if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, + ip_current_src_addr(), + ip_current_dest_addr()) != 0) { + goto chkerr; + } + } + } + } +#endif /* CHECKSUM_CHECK_UDP */ + if (pbuf_remove_header(p, UDP_HLEN)) { + /* Can we cope with this failing? Just assert for now */ + LWIP_ASSERT("pbuf_remove_header failed\n", 0); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + goto end; + } + + if (pcb != NULL) { + MIB2_STATS_INC(mib2.udpindatagrams); +#if SO_REUSE && SO_REUSE_RXTOALL + if (ip_get_option(pcb, SOF_REUSEADDR) && + (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { + /* pass broadcast- or multicast packets to all multicast pcbs + if SOF_REUSEADDR is set on the first match */ + struct udp_pcb *mpcb; + for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { + if (mpcb != pcb) { + /* compare PCB local addr+port to UDP destination addr+port */ + if ((mpcb->local_port == dest) && + (udp_input_local_match(mpcb, inp, broadcast) != 0)) { + /* pass a copy of the packet to all local matches */ + if (mpcb->recv != NULL) { + struct pbuf *q; + q = pbuf_clone(PBUF_RAW, PBUF_POOL, p); + if (q != NULL) { + mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); + } + } + } + } + } + } +#endif /* SO_REUSE && SO_REUSE_RXTOALL */ + /* callback */ + if (pcb->recv != NULL) { + /* now the recv function is responsible for freeing p */ + pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); + } else { + /* no recv function registered? then we have to free the pbuf! */ + pbuf_free(p); + goto end; + } + } else { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); + +#if LWIP_ICMP || LWIP_ICMP6 + /* No match was found, send ICMP destination port unreachable unless + destination address was broadcast/multicast. */ + if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { + /* move payload pointer back to ip header */ + pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); + icmp_port_unreach(ip_current_is_v6(), p); + } +#endif /* LWIP_ICMP || LWIP_ICMP6 */ + UDP_STATS_INC(udp.proterr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpnoports); + pbuf_free(p); + } + } else { + pbuf_free(p); + } +end: + PERF_STOP("udp_input"); + return; +#if CHECKSUM_CHECK_UDP +chkerr: + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); + UDP_STATS_INC(udp.chkerr); + UDP_STATS_INC(udp.drop); + MIB2_STATS_INC(mib2.udpinerrors); + pbuf_free(p); + PERF_STOP("udp_input"); +#endif /* CHECKSUM_CHECK_UDP */ +} + +/** + * @ingroup udp_raw + * Sends the pbuf p using UDP. The pbuf is not deallocated. + * + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * + * The datagram will be sent to the current remote_ip & remote_port + * stored in pcb. If the pcb is not bound to a port, it will + * automatically be bound to a random port. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_MEM. Out of memory. + * - ERR_RTE. Could not find route to destination address. + * - ERR_VAL. No PCB or PCB is dual-stack + * - More errors could be returned by lower protocol layers. + * + * @see udp_disconnect() udp_sendto() + */ +err_t +udp_send(struct udp_pcb *pcb, struct pbuf *p) +{ + LWIP_ERROR("udp_send: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); +} + +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP +/** @ingroup udp_raw + * Same as udp_send() but with checksum + */ +err_t +udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, + u8_t have_chksum, u16_t chksum) +{ + LWIP_ERROR("udp_send_chksum: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_send_chksum: invalid pbuf", p != NULL, return ERR_ARG); + + if (IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { + return ERR_VAL; + } + + /* send to the packet using remote ip and port stored in the pcb */ + return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, + have_chksum, chksum); +} +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * If the PCB already has a remote address association, it will + * be restored after the data is sent. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); +} + +/** @ingroup udp_raw + * Same as udp_sendto(), but with checksum */ +err_t +udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, u8_t have_chksum, u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct netif *netif; + + LWIP_ERROR("udp_sendto: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); + + if (pcb->netif_idx != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->netif_idx); + } else { +#if LWIP_MULTICAST_TX_OPTIONS + netif = NULL; + if (ip_addr_ismulticast(dst_ip)) { + /* For IPv6, the interface to use for packets with a multicast destination + * is specified using an interface index. The same approach may be used for + * IPv4 as well, in which case it overrides the IPv4 multicast override + * address below. Here we have to look up the netif by going through the + * list, but by doing so we skip a route lookup. If the interface index has + * gone stale, we fall through and do the regular route lookup after all. */ + if (pcb->mcast_ifindex != NETIF_NO_INDEX) { + netif = netif_get_by_index(pcb->mcast_ifindex); + } +#if LWIP_IPV4 + else +#if LWIP_IPV6 + if (IP_IS_V4(dst_ip)) +#endif /* LWIP_IPV6 */ + { + /* IPv4 does not use source-based routing by default, so we use an + administratively selected interface for multicast by default. + However, this can be overridden by setting an interface address + in pcb->mcast_ip4 that is used for routing. If this routing lookup + fails, we try regular routing as though no override was set. */ + if (!ip4_addr_isany_val(pcb->mcast_ip4) && + !ip4_addr_cmp(&pcb->mcast_ip4, IP4_ADDR_BROADCAST)) { + netif = ip4_route_src(ip_2_ip4(&pcb->local_ip), &pcb->mcast_ip4); + } + } +#endif /* LWIP_IPV4 */ + } + + if (netif == NULL) +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + { + /* find the outgoing network interface for this packet */ + netif = ip_route(&pcb->local_ip, dst_ip); + } + } + + /* no outgoing network interface could be found? */ + if (netif == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); + LWIP_DEBUGF(UDP_DEBUG, ("\n")); + UDP_STATS_INC(udp.rterr); + return ERR_RTE; + } +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** + * @ingroup udp_raw + * Send data to a specified address using UDP. + * The netif used for sending can be specified. + * + * This function exists mainly for DHCP, to be able to send UDP packets + * on a netif that is still down. + * + * @param pcb UDP PCB used to send the data. + * @param p chain of pbuf's to be sent. + * @param dst_ip Destination IP address. + * @param dst_port Destination UDP port. + * @param netif the netif used for sending. + * + * dst_ip & dst_port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code (@see udp_send for possible error codes) + * + * @see udp_disconnect() udp_send() + */ +err_t +udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); +} + +/** Same as udp_sendto_if(), but with checksum */ +err_t +udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + const ip_addr_t *src_ip; + + LWIP_ERROR("udp_sendto_if: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + + /* PCB local address is IP_ANY_ADDR or multicast? */ +#if LWIP_IPV6 + if (IP_IS_V6(dst_ip)) { + if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip)) || + ip6_addr_ismulticast(ip_2_ip6(&pcb->local_ip))) { + src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); + if (src_ip == NULL) { + /* No suitable source address was found. */ + return ERR_RTE; + } + } else { + /* use UDP PCB local IPv6 address as source address, if still valid. */ + if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { + /* Address isn't valid anymore. */ + return ERR_RTE; + } + src_ip = &pcb->local_ip; + } + } +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 && LWIP_IPV6 + else +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#if LWIP_IPV4 + if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || + ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { + /* if the local_ip is any or multicast + * use the outgoing network interface IP address as source address */ + src_ip = netif_ip_addr4(netif); + } else { + /* check if UDP PCB local IP address is correct + * this could be an old address if netif->ip_addr has changed */ + if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { + /* local_ip doesn't match, drop the packet */ + return ERR_RTE; + } + /* use UDP PCB local IP address as source address */ + src_ip = &pcb->local_ip; + } +#endif /* LWIP_IPV4 */ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); +#else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ +} + +/** @ingroup udp_raw + * Same as @ref udp_sendto_if, but with source address */ +err_t +udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, + const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) +{ +#if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP + return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); +} + +/** Same as udp_sendto_if_src(), but with checksum */ +err_t +udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, + u16_t dst_port, struct netif *netif, u8_t have_chksum, + u16_t chksum, const ip_addr_t *src_ip) +{ +#endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ + struct udp_hdr *udphdr; + err_t err; + struct pbuf *q; /* q will be sent down the stack */ + u8_t ip_proto; + u8_t ttl; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_sendto_if_src: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid pbuf", p != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid dst_ip", dst_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid src_ip", src_ip != NULL, return ERR_ARG); + LWIP_ERROR("udp_sendto_if_src: invalid netif", netif != NULL, return ERR_ARG); + + if (!IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || + !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { + return ERR_VAL; + } + +#if LWIP_IPV4 && IP_SOF_BROADCAST + /* broadcast filter? */ + if (!ip_get_option(pcb, SOF_BROADCAST) && +#if LWIP_IPV6 + IP_IS_V4(dst_ip) && +#endif /* LWIP_IPV6 */ + ip_addr_isbroadcast(dst_ip, netif)) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, + ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); + return ERR_VAL; + } +#endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ + + /* if the PCB is not yet bound to a port, bind it here */ + if (pcb->local_port == 0) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); + err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); + return err; + } + } + + /* packet too large to add a UDP header without causing an overflow? */ + if ((u16_t)(p->tot_len + UDP_HLEN) < p->tot_len) { + return ERR_MEM; + } + /* not enough space to add an UDP header to first pbuf in given p chain? */ + if (pbuf_add_header(p, UDP_HLEN)) { + /* allocate header in a separate new pbuf */ + q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); + /* new header pbuf could not be allocated? */ + if (q == NULL) { + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); + return ERR_MEM; + } + if (p->tot_len != 0) { + /* chain header q in front of given pbuf p (only if p contains data) */ + pbuf_chain(q, p); + } + /* first pbuf q points to header pbuf */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); + } else { + /* adding space for header within p succeeded */ + /* first pbuf q equals given pbuf */ + q = p; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); + } + LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", + (q->len >= sizeof(struct udp_hdr))); + /* q now represents the packet to be sent */ + udphdr = (struct udp_hdr *)q->payload; + udphdr->src = lwip_htons(pcb->local_port); + udphdr->dest = lwip_htons(dst_port); + /* in UDP, 0 checksum means 'no checksum' */ + udphdr->chksum = 0x0000; + + /* Multicast Loop? */ +#if LWIP_MULTICAST_TX_OPTIONS + if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { + q->flags |= PBUF_FLAG_MCASTLOOP; + } +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); + +#if LWIP_UDPLITE + /* UDP Lite protocol? */ + if (pcb->flags & UDP_FLAGS_UDPLITE) { + u16_t chklen, chklen_hdr; + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); + /* set UDP message length in UDP header */ + chklen_hdr = chklen = pcb->chksum_len_tx; + if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { + if (chklen != 0) { + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); + } + /* For UDP-Lite, checksum length of 0 means checksum + over the complete packet. (See RFC 3828 chap. 3.1) + At least the UDP-Lite header must be covered by the + checksum, therefore, if chksum_len has an illegal + value, we generate the checksum over the complete + packet to be safe. */ + chklen_hdr = 0; + chklen = q->tot_len; + } + udphdr->len = lwip_htons(chklen_hdr); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + chklen = UDP_HLEN; + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, + q->tot_len, chklen, src_ip, dst_ip); +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + acc = udphdr->chksum + (u16_t)~(chksum); + udphdr->chksum = FOLD_U32T(acc); + } +#endif /* LWIP_CHECKSUM_ON_COPY */ + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udphdr->chksum == 0x0000) { + udphdr->chksum = 0xffff; + } + } +#endif /* CHECKSUM_GEN_UDP */ + + ip_proto = IP_PROTO_UDPLITE; + } else +#endif /* LWIP_UDPLITE */ + { /* UDP */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); + udphdr->len = lwip_htons(q->tot_len); + /* calculate checksum */ +#if CHECKSUM_GEN_UDP + IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { + /* Checksum is mandatory over IPv6. */ + if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { + u16_t udpchksum; +#if LWIP_CHECKSUM_ON_COPY + if (have_chksum) { + u32_t acc; + udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, + q->tot_len, UDP_HLEN, src_ip, dst_ip); + acc = udpchksum + (u16_t)~(chksum); + udpchksum = FOLD_U32T(acc); + } else +#endif /* LWIP_CHECKSUM_ON_COPY */ + { + udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, + src_ip, dst_ip); + } + + /* chksum zero must become 0xffff, as zero means 'no checksum' */ + if (udpchksum == 0x0000) { + udpchksum = 0xffff; + } + udphdr->chksum = udpchksum; + } + } +#endif /* CHECKSUM_GEN_UDP */ + ip_proto = IP_PROTO_UDP; + } + + /* Determine TTL to use */ +#if LWIP_MULTICAST_TX_OPTIONS + ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); +#else /* LWIP_MULTICAST_TX_OPTIONS */ + ttl = pcb->ttl; +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); + LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); + /* output to IP */ + NETIF_SET_HINTS(netif, &(pcb->netif_hints)); + err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); + NETIF_RESET_HINTS(netif); + + /* @todo: must this be increased even if error occurred? */ + MIB2_STATS_INC(mib2.udpoutdatagrams); + + /* did we chain a separate header pbuf earlier? */ + if (q != p) { + /* free the header pbuf */ + pbuf_free(q); + q = NULL; + /* p is still referenced by the caller, and will live on */ + } + + UDP_STATS_INC(udp.xmit); + return err; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB. + * + * @param pcb UDP PCB to be bound with a local address ipaddr and port. + * @param ipaddr local IP address to bind with. Use IP_ANY_TYPE to + * bind to all local interfaces. + * @param port local UDP port to bind with. Use 0 to automatically bind + * to a random port between UDP_LOCAL_PORT_RANGE_START and + * UDP_LOCAL_PORT_RANGE_END. + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * @return lwIP error code. + * - ERR_OK. Successful. No error occurred. + * - ERR_USE. The specified ipaddr and port are already bound to by + * another UDP PCB. + * + * @see udp_disconnect() + */ +err_t +udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + u8_t rebind; +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + ip_addr_t zoned_ipaddr; +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + LWIP_ASSERT_CORE_LOCKED(); + +#if LWIP_IPV4 + /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ + if (ipaddr == NULL) { + ipaddr = IP4_ADDR_ANY; + } +#else /* LWIP_IPV4 */ + LWIP_ERROR("udp_bind: invalid ipaddr", ipaddr != NULL, return ERR_ARG); +#endif /* LWIP_IPV4 */ + + LWIP_ERROR("udp_bind: invalid pcb", pcb != NULL, return ERR_ARG); + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); + ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); + + rebind = 0; + /* Check for double bind and rebind of the same pcb */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + /* is this UDP PCB already on active list? */ + if (pcb == ipcb) { + rebind = 1; + break; + } + } + +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now. + * This is legacy support: scope-aware callers should always provide properly + * zoned source addresses. Do the zone selection before the address-in-use + * check below; as such we have to make a temporary copy of the address. */ + if (IP_IS_V6(ipaddr) && ip6_addr_lacks_zone(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { + ip_addr_copy(zoned_ipaddr, *ipaddr); + ip6_addr_select_zone(ip_2_ip6(&zoned_ipaddr), ip_2_ip6(&zoned_ipaddr)); + ipaddr = &zoned_ipaddr; + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + /* no port specified? */ + if (port == 0) { + port = udp_new_port(); + if (port == 0) { + /* no more ports available in local range */ + LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); + return ERR_USE; + } + } else { + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb != ipcb) { + /* By default, we don't allow to bind to a port that any other udp + PCB is already bound to, unless *all* PCBs with that port have tha + REUSEADDR flag set. */ +#if SO_REUSE + if (!ip_get_option(pcb, SOF_REUSEADDR) || + !ip_get_option(ipcb, SOF_REUSEADDR)) +#endif /* SO_REUSE */ + { + /* port matches that of PCB in list and REUSEADDR not set -> reject */ + if ((ipcb->local_port == port) && + /* IP address matches or any IP used? */ + (ip_addr_cmp(&ipcb->local_ip, ipaddr) || ip_addr_isany(ipaddr) || + ip_addr_isany(&ipcb->local_ip))) { + /* other PCB already binds to this local IP and port */ + LWIP_DEBUGF(UDP_DEBUG, + ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); + return ERR_USE; + } + } + } + } + } + + ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); + + pcb->local_port = port; + mib2_udp_bind(pcb); + /* pcb not active yet? */ + if (rebind == 0) { + /* place the PCB on the active list if not already there */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + } + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, pcb->local_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Bind an UDP PCB to a specific netif. + * After calling this function, all packets received via this PCB + * are guaranteed to have come in via the specified netif, and all + * outgoing packets will go out via the specified netif. + * + * @param pcb UDP PCB to be bound. + * @param netif netif to bind udp pcb to. Can be NULL. + * + * @see udp_disconnect() + */ +void +udp_bind_netif(struct udp_pcb *pcb, const struct netif *netif) +{ + LWIP_ASSERT_CORE_LOCKED(); + + if (netif != NULL) { + pcb->netif_idx = netif_get_index(netif); + } else { + pcb->netif_idx = NETIF_NO_INDEX; + } +} + +/** + * @ingroup udp_raw + * Sets the remote end of the pcb. This function does not generate any + * network traffic, but only sets the remote address of the pcb. + * + * @param pcb UDP PCB to be connected with remote address ipaddr and port. + * @param ipaddr remote IP address to connect with. + * @param port remote UDP port to connect with. + * + * @return lwIP error code + * + * ipaddr & port are expected to be in the same byte order as in the pcb. + * + * The udp pcb is bound to a random local port if not already bound. + * + * @see udp_disconnect() + */ +err_t +udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) +{ + struct udp_pcb *ipcb; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_connect: invalid pcb", pcb != NULL, return ERR_ARG); + LWIP_ERROR("udp_connect: invalid ipaddr", ipaddr != NULL, return ERR_ARG); + + if (pcb->local_port == 0) { + err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); + if (err != ERR_OK) { + return err; + } + } + + ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); +#if LWIP_IPV6 && LWIP_IPV6_SCOPES + /* If the given IP address should have a zone but doesn't, assign one now, + * using the bound address to make a more informed decision when possible. */ + if (IP_IS_V6(&pcb->remote_ip) && + ip6_addr_lacks_zone(ip_2_ip6(&pcb->remote_ip), IP6_UNKNOWN)) { + ip6_addr_select_zone(ip_2_ip6(&pcb->remote_ip), ip_2_ip6(&pcb->local_ip)); + } +#endif /* LWIP_IPV6 && LWIP_IPV6_SCOPES */ + + pcb->remote_port = port; + pcb->flags |= UDP_FLAGS_CONNECTED; + + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); + ip_addr_debug_print_val(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, + pcb->remote_ip); + LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); + + /* Insert UDP PCB into the list of active UDP PCBs. */ + for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { + if (pcb == ipcb) { + /* already on the list, just return */ + return ERR_OK; + } + } + /* PCB not yet on the list, add PCB now */ + pcb->next = udp_pcbs; + udp_pcbs = pcb; + return ERR_OK; +} + +/** + * @ingroup udp_raw + * Remove the remote end of the pcb. This function does not generate + * any network traffic, but only removes the remote address of the pcb. + * + * @param pcb the udp pcb to disconnect. + */ +void +udp_disconnect(struct udp_pcb *pcb) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_disconnect: invalid pcb", pcb != NULL, return); + + /* reset remote address association */ +#if LWIP_IPV4 && LWIP_IPV6 + if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { + ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); + } else { +#endif + ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); +#if LWIP_IPV4 && LWIP_IPV6 + } +#endif + pcb->remote_port = 0; + pcb->netif_idx = NETIF_NO_INDEX; + /* mark PCB as unconnected */ + udp_clear_flags(pcb, UDP_FLAGS_CONNECTED); +} + +/** + * @ingroup udp_raw + * Set a receive callback for a UDP PCB. + * This callback will be called when receiving a datagram for the pcb. + * + * @param pcb the pcb for which to set the recv callback + * @param recv function pointer of the callback function + * @param recv_arg additional argument to pass to the callback function + */ +void +udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) +{ + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_recv: invalid pcb", pcb != NULL, return); + + /* remember recv() callback and user data */ + pcb->recv = recv; + pcb->recv_arg = recv_arg; +} + +/** + * @ingroup udp_raw + * Removes and deallocates the pcb. + * + * @param pcb UDP PCB to be removed. The PCB is removed from the list of + * UDP PCB's and the data structure is freed from memory. + * + * @see udp_new() + */ +void +udp_remove(struct udp_pcb *pcb) +{ + struct udp_pcb *pcb2; + + LWIP_ASSERT_CORE_LOCKED(); + + LWIP_ERROR("udp_remove: invalid pcb", pcb != NULL, return); + + mib2_udp_unbind(pcb); + /* pcb to be removed is first in list? */ + if (udp_pcbs == pcb) { + /* make list start at 2nd pcb */ + udp_pcbs = udp_pcbs->next; + /* pcb not 1st in list */ + } else { + for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { + /* find pcb in udp_pcbs list */ + if (pcb2->next != NULL && pcb2->next == pcb) { + /* remove pcb from list */ + pcb2->next = pcb->next; + break; + } + } + } + memp_free(MEMP_UDP_PCB, pcb); +} + +/** + * @ingroup udp_raw + * Creates a new UDP pcb which can be used for UDP communication. The + * pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new(void) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); + /* could allocate UDP PCB? */ + if (pcb != NULL) { + /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 + * which means checksum is generated over the whole datagram per default + * (recommended as default by RFC 3828). */ + /* initialize PCB to all zeroes */ + memset(pcb, 0, sizeof(struct udp_pcb)); + pcb->ttl = UDP_TTL; +#if LWIP_MULTICAST_TX_OPTIONS + udp_set_multicast_ttl(pcb, UDP_TTL); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + } + return pcb; +} + +/** + * @ingroup udp_raw + * Create a UDP PCB for specific IP type. + * The pcb is not active until it has either been bound to a local address + * or connected to a remote address. + * + * @param type IP address type, see @ref lwip_ip_addr_type definitions. + * If you want to listen to IPv4 and IPv6 (dual-stack) packets, + * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. + * @return The UDP PCB which was created. NULL if the PCB data structure + * could not be allocated. + * + * @see udp_remove() + */ +struct udp_pcb * +udp_new_ip_type(u8_t type) +{ + struct udp_pcb *pcb; + + LWIP_ASSERT_CORE_LOCKED(); + + pcb = udp_new(); +#if LWIP_IPV4 && LWIP_IPV6 + if (pcb != NULL) { + IP_SET_TYPE_VAL(pcb->local_ip, type); + IP_SET_TYPE_VAL(pcb->remote_ip, type); + } +#else + LWIP_UNUSED_ARG(type); +#endif /* LWIP_IPV4 && LWIP_IPV6 */ + return pcb; +} + +/** This function is called from netif.c when address is changed + * + * @param old_addr IP address of the netif before change + * @param new_addr IP address of the netif after change + */ +void udp_netif_ip_addr_changed(const ip_addr_t *old_addr, const ip_addr_t *new_addr) +{ + struct udp_pcb *upcb; + + if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { + for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { + /* PCB bound to current local interface address? */ + if (ip_addr_cmp(&upcb->local_ip, old_addr)) { + /* The PCB is bound to the old ipaddr and + * is set to bound to the new one instead */ + ip_addr_copy(upcb->local_ip, *new_addr); + } + } + } +} + +#if UDP_DEBUG +/** + * Print UDP header information for debug purposes. + * + * @param udphdr pointer to the udp header in memory. + */ +void +udp_debug_print(struct udp_hdr *udphdr) +{ + LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", + lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); + LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", + lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); + LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); +} +#endif /* UDP_DEBUG */ + +#endif /* LWIP_UDP */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h new file mode 100644 index 0000000..68267e5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/arpa/inet.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h new file mode 100644 index 0000000..a17e8ad --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/net/if.h @@ -0,0 +1,36 @@ +/** + * @file + * This file is a posix wrapper for lwip/if_api.h. + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/if_api.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h new file mode 100644 index 0000000..29abbaf --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/netdb.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/netdb.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/netdb.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h new file mode 100644 index 0000000..68267e5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/posix/sys/socket.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix wrapper for lwip/sockets.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/sockets.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h new file mode 100644 index 0000000..a3bbfd2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/compat/stdc/errno.h @@ -0,0 +1,33 @@ +/** + * @file + * This file is a posix/stdc wrapper for lwip/errno.h. + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ + +#include "lwip/errno.h" diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h new file mode 100644 index 0000000..08bb8d5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp.h @@ -0,0 +1,201 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * + * This file contains the generic API. + * For more details see @ref altcp_api. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_H +#define LWIP_HDR_ALTCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/tcpbase.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb; +struct altcp_functions; + +typedef err_t (*altcp_accept_fn)(void *arg, struct altcp_pcb *new_conn, err_t err); +typedef err_t (*altcp_connected_fn)(void *arg, struct altcp_pcb *conn, err_t err); +typedef err_t (*altcp_recv_fn)(void *arg, struct altcp_pcb *conn, struct pbuf *p, err_t err); +typedef err_t (*altcp_sent_fn)(void *arg, struct altcp_pcb *conn, u16_t len); +typedef err_t (*altcp_poll_fn)(void *arg, struct altcp_pcb *conn); +typedef void (*altcp_err_fn)(void *arg, err_t err); + +typedef struct altcp_pcb* (*altcp_new_fn)(void *arg, u8_t ip_type); + +struct altcp_pcb { + const struct altcp_functions *fns; + struct altcp_pcb *inner_conn; + void *arg; + void *state; + /* application callbacks */ + altcp_accept_fn accept; + altcp_connected_fn connected; + altcp_recv_fn recv; + altcp_sent_fn sent; + altcp_poll_fn poll; + altcp_err_fn err; + u8_t pollinterval; +}; + +/** @ingroup altcp */ +typedef struct altcp_allocator_s { + /** Allocator function */ + altcp_new_fn alloc; + /** Argument to allocator function */ + void *arg; +} altcp_allocator_t; + +struct altcp_pcb *altcp_new(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip6(altcp_allocator_t *allocator); +struct altcp_pcb *altcp_new_ip_type(altcp_allocator_t *allocator, u8_t ip_type); + +void altcp_arg(struct altcp_pcb *conn, void *arg); +void altcp_accept(struct altcp_pcb *conn, altcp_accept_fn accept); +void altcp_recv(struct altcp_pcb *conn, altcp_recv_fn recv); +void altcp_sent(struct altcp_pcb *conn, altcp_sent_fn sent); +void altcp_poll(struct altcp_pcb *conn, altcp_poll_fn poll, u8_t interval); +void altcp_err(struct altcp_pcb *conn, altcp_err_fn err); + +void altcp_recved(struct altcp_pcb *conn, u16_t len); +err_t altcp_bind(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port); +err_t altcp_connect(struct altcp_pcb *conn, const ip_addr_t *ipaddr, u16_t port, altcp_connected_fn connected); + +/* return conn for source code compatibility to tcp callback API only */ +struct altcp_pcb *altcp_listen_with_backlog_and_err(struct altcp_pcb *conn, u8_t backlog, err_t *err); +#define altcp_listen_with_backlog(conn, backlog) altcp_listen_with_backlog_and_err(conn, backlog, NULL) +/** @ingroup altcp */ +#define altcp_listen(conn) altcp_listen_with_backlog_and_err(conn, TCP_DEFAULT_LISTEN_BACKLOG, NULL) + +void altcp_abort(struct altcp_pcb *conn); +err_t altcp_close(struct altcp_pcb *conn); +err_t altcp_shutdown(struct altcp_pcb *conn, int shut_rx, int shut_tx); + +err_t altcp_write(struct altcp_pcb *conn, const void *dataptr, u16_t len, u8_t apiflags); +err_t altcp_output(struct altcp_pcb *conn); + +u16_t altcp_mss(struct altcp_pcb *conn); +u16_t altcp_sndbuf(struct altcp_pcb *conn); +u16_t altcp_sndqueuelen(struct altcp_pcb *conn); +void altcp_nagle_disable(struct altcp_pcb *conn); +void altcp_nagle_enable(struct altcp_pcb *conn); +int altcp_nagle_disabled(struct altcp_pcb *conn); + +void altcp_setprio(struct altcp_pcb *conn, u8_t prio); + +err_t altcp_get_tcp_addrinfo(struct altcp_pcb *conn, int local, ip_addr_t *addr, u16_t *port); +ip_addr_t *altcp_get_ip(struct altcp_pcb *conn, int local); +u16_t altcp_get_port(struct altcp_pcb *conn, int local); + +#ifdef LWIP_DEBUG +enum tcp_state altcp_dbg_get_tcp_state(struct altcp_pcb *conn); +#endif + +#ifdef __cplusplus +} +#endif + +#else /* LWIP_ALTCP */ + +/* ALTCP disabled, define everything to link against tcp callback API (e.g. to get a small non-ssl httpd) */ + +#include "lwip/tcp.h" + +#define altcp_accept_fn tcp_accept_fn +#define altcp_connected_fn tcp_connected_fn +#define altcp_recv_fn tcp_recv_fn +#define altcp_sent_fn tcp_sent_fn +#define altcp_poll_fn tcp_poll_fn +#define altcp_err_fn tcp_err_fn + +#define altcp_pcb tcp_pcb +#define altcp_tcp_new_ip_type tcp_new_ip_type +#define altcp_tcp_new tcp_new +#define altcp_tcp_new_ip6 tcp_new_ip6 + +#define altcp_new(allocator) tcp_new() +#define altcp_new_ip6(allocator) tcp_new_ip6() +#define altcp_new_ip_type(allocator, ip_type) tcp_new_ip_type(ip_type) + +#define altcp_arg tcp_arg +#define altcp_accept tcp_accept +#define altcp_recv tcp_recv +#define altcp_sent tcp_sent +#define altcp_poll tcp_poll +#define altcp_err tcp_err + +#define altcp_recved tcp_recved +#define altcp_bind tcp_bind +#define altcp_connect tcp_connect + +#define altcp_listen_with_backlog_and_err tcp_listen_with_backlog_and_err +#define altcp_listen_with_backlog tcp_listen_with_backlog +#define altcp_listen tcp_listen + +#define altcp_abort tcp_abort +#define altcp_close tcp_close +#define altcp_shutdown tcp_shutdown + +#define altcp_write tcp_write +#define altcp_output tcp_output + +#define altcp_mss tcp_mss +#define altcp_sndbuf tcp_sndbuf +#define altcp_sndqueuelen tcp_sndqueuelen +#define altcp_nagle_disable tcp_nagle_disable +#define altcp_nagle_enable tcp_nagle_enable +#define altcp_nagle_disabled tcp_nagle_disabled +#define altcp_setprio tcp_setprio + +#define altcp_get_tcp_addrinfo tcp_get_tcp_addrinfo +#define altcp_get_ip(pcb, local) ((local) ? (&(pcb)->local_ip) : (&(pcb)->remote_ip)) + +#ifdef LWIP_DEBUG +#define altcp_dbg_get_tcp_state tcp_dbg_get_tcp_state +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h new file mode 100644 index 0000000..53550c7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tcp.h @@ -0,0 +1,72 @@ +/** + * @file + * Application layered TCP connection API (to be used from TCPIP thread)\n + * This interface mimics the tcp callback API to the application while preventing + * direct linking (much like virtual functions). + * This way, an application can make use of other application layer protocols + * on top of TCP without knowing the details (e.g. TLS, proxy connection). + * + * This file contains the base implementation calling into tcp. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TCP_H +#define LWIP_HDR_ALTCP_TCP_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_pcb *altcp_tcp_new_ip_type(u8_t ip_type); + +#define altcp_tcp_new() altcp_tcp_new_ip_type(IPADDR_TYPE_V4) +#define altcp_tcp_new_ip6() altcp_tcp_new_ip_type(IPADDR_TYPE_V6) + +struct altcp_pcb *altcp_tcp_alloc(void *arg, u8_t ip_type); + +struct tcp_pcb; +struct altcp_pcb *altcp_tcp_wrap(struct tcp_pcb *tpcb); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TCP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h new file mode 100644 index 0000000..c5a6820 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/altcp_tls.h @@ -0,0 +1,117 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * @defgroup altcp_tls TLS layer + * @ingroup altcp + * This file contains function prototypes for a TLS layer. + * A port to ARM mbedtls is provided in the apps/ tree + * (LWIP_ALTCP_TLS_MBEDTLS option). + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_H +#define LWIP_HDR_ALTCP_TLS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#if LWIP_ALTCP_TLS + +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup altcp_tls + * ALTCP_TLS configuration handle, content depends on port (e.g. mbedtls) + */ +struct altcp_tls_config; + +/** @ingroup altcp_tls + * Create an ALTCP_TLS server configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_server_privkey_cert(const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle + */ +struct altcp_tls_config *altcp_tls_create_config_client(const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Create an ALTCP_TLS client configuration handle with two-way server/client authentication + */ +struct altcp_tls_config *altcp_tls_create_config_client_2wayauth(const u8_t *ca, size_t ca_len, const u8_t *privkey, size_t privkey_len, + const u8_t *privkey_pass, size_t privkey_pass_len, + const u8_t *cert, size_t cert_len); + +/** @ingroup altcp_tls + * Free an ALTCP_TLS configuration handle + */ +void altcp_tls_free_config(struct altcp_tls_config *conf); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer wrapping an existing pcb as inner connection (e.g. TLS over TCP) + */ +struct altcp_pcb *altcp_tls_wrap(struct altcp_tls_config *config, struct altcp_pcb *inner_pcb); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS pcb and its inner tcp pcb + */ +struct altcp_pcb *altcp_tls_new(struct altcp_tls_config *config, u8_t ip_type); + +/** @ingroup altcp_tls + * Create new ALTCP_TLS layer pcb and its inner tcp pcb. + * Same as @ref altcp_tls_new but this allocator function fits to + * @ref altcp_allocator_t / @ref altcp_new.\n + 'arg' must contain a struct altcp_tls_config *. + */ +struct altcp_pcb *altcp_tls_alloc(void *arg, u8_t ip_type); + +/** @ingroup altcp_tls + * Return pointer to internal TLS context so application can tweak it. + * Real type depends on port (e.g. mbedtls) + */ +void *altcp_tls_context(struct altcp_pcb *conn); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP_TLS */ +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_ALTCP_TLS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h new file mode 100644 index 0000000..eca6ac2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/api.h @@ -0,0 +1,431 @@ +/** + * @file + * netconn API (to be used from non-TCPIP threads) + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_API_H +#define LWIP_HDR_API_H + +#include "lwip/opt.h" + +#if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ +/* Note: Netconn API is always available when sockets are enabled - + * sockets are implemented on top of them */ + +#include "lwip/arch.h" +#include "lwip/netbuf.h" +#include "lwip/sys.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Throughout this file, IP addresses and port numbers are expected to be in + * the same byte order as in the corresponding pcb. + */ + +/* Flags for netconn_write (u8_t) */ +#define NETCONN_NOFLAG 0x00 +#define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ +#define NETCONN_COPY 0x01 +#define NETCONN_MORE 0x02 +#define NETCONN_DONTBLOCK 0x04 +#define NETCONN_NOAUTORCVD 0x08 /* prevent netconn_recv_data_tcp() from updating the tcp window - must be done manually via netconn_tcp_recvd() */ +#define NETCONN_NOFIN 0x10 /* upper layer already received data, leave FIN in queue until called again */ + +/* Flags for struct netconn.flags (u8_t) */ +/** This netconn had an error, don't block on recvmbox/acceptmbox any more */ +#define NETCONN_FLAG_MBOXCLOSED 0x01 +/** Should this netconn avoid blocking? */ +#define NETCONN_FLAG_NON_BLOCKING 0x02 +/** Was the last connect action a non-blocking one? */ +#define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 +#if LWIP_NETCONN_FULLDUPLEX + /** The mbox of this netconn is being deallocated, don't use it anymore */ +#define NETCONN_FLAG_MBOXINVALID 0x08 +#endif /* LWIP_NETCONN_FULLDUPLEX */ +/** If a nonblocking write has been rejected before, poll_tcp needs to + check if the netconn is writable again */ +#define NETCONN_FLAG_CHECK_WRITESPACE 0x10 +#if LWIP_IPV6 +/** If this flag is set then only IPv6 communication is allowed on the + netconn. As per RFC#3493 this features defaults to OFF allowing + dual-stack usage by default. */ +#define NETCONN_FLAG_IPV6_V6ONLY 0x20 +#endif /* LWIP_IPV6 */ +#if LWIP_NETBUF_RECVINFO +/** Received packet info will be recorded for this netconn */ +#define NETCONN_FLAG_PKTINFO 0x40 +#endif /* LWIP_NETBUF_RECVINFO */ +/** A FIN has been received but not passed to the application yet */ +#define NETCONN_FIN_RX_PENDING 0x80 + +/* Helpers to process several netconn_types by the same code */ +#define NETCONNTYPE_GROUP(t) ((t)&0xF0) +#define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) +#if LWIP_IPV6 +#define NETCONN_TYPE_IPV6 0x08 +#define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) +#define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) +#else /* LWIP_IPV6 */ +#define NETCONNTYPE_ISIPV6(t) (0) +#define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) +#define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) +#endif /* LWIP_IPV6 */ + +/** @ingroup netconn_common + * Protocol family and type of the netconn + */ +enum netconn_type { + NETCONN_INVALID = 0, + /** TCP IPv4 */ + NETCONN_TCP = 0x10, +#if LWIP_IPV6 + /** TCP IPv6 */ + NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, +#endif /* LWIP_IPV6 */ + /** UDP IPv4 */ + NETCONN_UDP = 0x20, + /** UDP IPv4 lite */ + NETCONN_UDPLITE = 0x21, + /** UDP IPv4 no checksum */ + NETCONN_UDPNOCHKSUM = 0x22, + +#if LWIP_IPV6 + /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, + /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, + /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, +#endif /* LWIP_IPV6 */ + + /** Raw connection IPv4 */ + NETCONN_RAW = 0x40 +#if LWIP_IPV6 + /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ + , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ +#endif /* LWIP_IPV6 */ +}; + +/** Current state of the netconn. Non-TCP netconns are always + * in state NETCONN_NONE! */ +enum netconn_state { + NETCONN_NONE, + NETCONN_WRITE, + NETCONN_LISTEN, + NETCONN_CONNECT, + NETCONN_CLOSE +}; + +/** Used to inform the callback function about changes + * + * Event explanation: + * + * In the netconn implementation, there are three ways to block a client: + * + * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) + * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) + * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) + * + * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking + * connections, you need to know in advance whether a call to a netconn function call would block or not, + * and these events tell you about that. + * + * RCVPLUS events say: Safe to perform a potentially blocking call call once more. + * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe + * to call netconn_accept 3 times without being blocked. + * Same thing for receive mbox. + * + * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". + * Socket implementation decrements the counter. + * + * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. + * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. + * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. + */ +enum netconn_evt { + NETCONN_EVT_RCVPLUS, + NETCONN_EVT_RCVMINUS, + NETCONN_EVT_SENDPLUS, + NETCONN_EVT_SENDMINUS, + NETCONN_EVT_ERROR +}; + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +/** Used for netconn_join_leave_group() */ +enum netconn_igmp { + NETCONN_JOIN, + NETCONN_LEAVE +}; +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ + +#if LWIP_DNS +/* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ +#define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 +#define NETCONN_DNS_IPV4 0 +#define NETCONN_DNS_IPV6 1 +#define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#endif /* LWIP_DNS */ + +/* forward-declare some structs to avoid to include their headers */ +struct ip_pcb; +struct tcp_pcb; +struct udp_pcb; +struct raw_pcb; +struct netconn; +struct api_msg; + +/** A callback prototype to inform about events for a netconn */ +typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); + +/** A netconn descriptor */ +struct netconn { + /** type of the netconn (TCP, UDP or RAW) */ + enum netconn_type type; + /** current state of the netconn */ + enum netconn_state state; + /** the lwIP internal protocol control block */ + union { + struct ip_pcb *ip; + struct tcp_pcb *tcp; + struct udp_pcb *udp; + struct raw_pcb *raw; + } pcb; + /** the last asynchronous unreported error this netconn had */ + err_t pending_err; +#if !LWIP_NETCONN_SEM_PER_THREAD + /** sem that is used to synchronously execute functions in the core context */ + sys_sem_t op_completed; +#endif + /** mbox where received packets are stored until they are fetched + by the netconn application thread (can grow quite big) */ + sys_mbox_t recvmbox; +#if LWIP_TCP + /** mbox where new connections are stored until processed + by the application thread */ + sys_mbox_t acceptmbox; +#endif /* LWIP_TCP */ +#if LWIP_NETCONN_FULLDUPLEX + /** number of threads waiting on an mbox. This is required to unblock + all threads when closing while threads are waiting. */ + int mbox_threads_waiting; +#endif + /** only used for socket layer */ +#if LWIP_SOCKET + int socket; +#endif /* LWIP_SOCKET */ +#if LWIP_SO_SNDTIMEO + /** timeout to wait for sending data (which means enqueueing data for sending + in internal buffers) in milliseconds */ + s32_t send_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVTIMEO + /** timeout in milliseconds to wait for new data to be received + (or connections to arrive for listening netconns) */ + u32_t recv_timeout; +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF + /** maximum amount of bytes queued in recvmbox + not used for TCP: adjust TCP_WND instead! */ + int recv_bufsize; + /** number of bytes currently in recvmbox to be received, + tested against recv_bufsize to limit bytes on recvmbox + for UDP and RAW, used for FIONREAD */ + int recv_avail; +#endif /* LWIP_SO_RCVBUF */ +#if LWIP_SO_LINGER + /** values <0 mean linger is disabled, values > 0 are seconds to linger */ + s16_t linger; +#endif /* LWIP_SO_LINGER */ + /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ + u8_t flags; +#if LWIP_TCP + /** TCP: when data passed to netconn_write doesn't fit into the send buffer, + this temporarily stores the message. + Also used during connect and close. */ + struct api_msg *current_msg; +#endif /* LWIP_TCP */ + /** A callback function that is informed about events for this netconn */ + netconn_callback callback; +}; + +/** This vector type is passed to @ref netconn_write_vectors_partly to send + * multiple buffers at once. + * ATTENTION: This type has to directly map struct iovec since one is casted + * into the other! + */ +struct netvector { + /** pointer to the application buffer that contains the data to send */ + const void *ptr; + /** size of the application data to send */ + size_t len; +}; + +/** Register an Network connection event */ +#define API_EVENT(c,e,l) if (c->callback) { \ + (*c->callback)(c, e, l); \ + } + +/* Network connection functions: */ + +/** @ingroup netconn_common + * Create new netconn connection + * @param t @ref netconn_type */ +#define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) +#define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) +struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, + netconn_callback callback); +err_t netconn_prepare_delete(struct netconn *conn); +err_t netconn_delete(struct netconn *conn); +/** Get the type of a netconn (as enum netconn_type). */ +#define netconn_type(conn) (conn->type) + +err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, + u16_t *port, u8_t local); +/** @ingroup netconn_common */ +#define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) +/** @ingroup netconn_common */ +#define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) + +err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_bind_if(struct netconn *conn, u8_t if_idx); +err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); +err_t netconn_disconnect (struct netconn *conn); +err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); +/** @ingroup netconn_tcp */ +#define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) +err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); +err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf(struct netconn *conn, struct netbuf **new_buf); +err_t netconn_recv_udp_raw_netbuf_flags(struct netconn *conn, struct netbuf **new_buf, u8_t apiflags); +err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); +err_t netconn_recv_tcp_pbuf_flags(struct netconn *conn, struct pbuf **new_buf, u8_t apiflags); +err_t netconn_tcp_recvd(struct netconn *conn, size_t len); +err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, + const ip_addr_t *addr, u16_t port); +err_t netconn_send(struct netconn *conn, struct netbuf *buf); +err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, + u8_t apiflags, size_t *bytes_written); +err_t netconn_write_vectors_partly(struct netconn *conn, struct netvector *vectors, u16_t vectorcnt, + u8_t apiflags, size_t *bytes_written); +/** @ingroup netconn_tcp */ +#define netconn_write(conn, dataptr, size, apiflags) \ + netconn_write_partly(conn, dataptr, size, apiflags, NULL) +err_t netconn_close(struct netconn *conn); +err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); + +#if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) +err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, + const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); +err_t netconn_join_leave_group_netif(struct netconn *conn, const ip_addr_t *multiaddr, + u8_t if_idx, enum netconn_igmp join_or_leave); +#endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ +#if LWIP_DNS +#if LWIP_IPV4 && LWIP_IPV6 +err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); +#define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) +#else /* LWIP_IPV4 && LWIP_IPV6 */ +err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); +#define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) +#endif /* LWIP_IPV4 && LWIP_IPV6 */ +#endif /* LWIP_DNS */ + +err_t netconn_err(struct netconn *conn); +#define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) + +#define netconn_set_flags(conn, set_flags) do { (conn)->flags = (u8_t)((conn)->flags | (set_flags)); } while(0) +#define netconn_clear_flags(conn, clr_flags) do { (conn)->flags = (u8_t)((conn)->flags & (u8_t)(~(clr_flags) & 0xff)); } while(0) +#define netconn_is_flag_set(conn, flag) (((conn)->flags & (flag)) != 0) + +/** Set the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_set_nonblocking(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_NON_BLOCKING); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_NON_BLOCKING); }} while(0) +/** Get the blocking status of netconn calls (@todo: write/send is missing) */ +#define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) + +#if LWIP_IPV6 +/** @ingroup netconn_common + * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_set_ipv6only(conn, val) do { if(val) { \ + netconn_set_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); \ +} else { \ + netconn_clear_flags(conn, NETCONN_FLAG_IPV6_V6ONLY); }} while(0) +/** @ingroup netconn_common + * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) + */ +#define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) +#endif /* LWIP_IPV6 */ + +#if LWIP_SO_SNDTIMEO +/** Set the send timeout in milliseconds */ +#define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) +/** Get the send timeout in milliseconds */ +#define netconn_get_sendtimeout(conn) ((conn)->send_timeout) +#endif /* LWIP_SO_SNDTIMEO */ +#if LWIP_SO_RCVTIMEO +/** Set the receive timeout in milliseconds */ +#define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) +/** Get the receive timeout in milliseconds */ +#define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) +#endif /* LWIP_SO_RCVTIMEO */ +#if LWIP_SO_RCVBUF +/** Set the receive buffer in bytes */ +#define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) +/** Get the receive buffer in bytes */ +#define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) +#endif /* LWIP_SO_RCVBUF*/ + +#if LWIP_NETCONN_SEM_PER_THREAD +void netconn_thread_init(void); +void netconn_thread_cleanup(void); +#else /* LWIP_NETCONN_SEM_PER_THREAD */ +#define netconn_thread_init() +#define netconn_thread_cleanup() +#endif /* LWIP_NETCONN_SEM_PER_THREAD */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_NETCONN || LWIP_SOCKET */ + +#endif /* LWIP_HDR_API_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h new file mode 100644 index 0000000..a7c3527 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_proxyconnect.h @@ -0,0 +1,79 @@ +/** + * @file + * Application layered TCP connection API that executes a proxy-connect. + * + * This file provides a starting layer that executes a proxy-connect e.g. to + * set up TLS connections through a http proxy. + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H +#define LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct altcp_proxyconnect_config { + ip_addr_t proxy_addr; + u16_t proxy_port; +}; + + +struct altcp_pcb *altcp_proxyconnect_new(struct altcp_proxyconnect_config *config, struct altcp_pcb *inner_pcb); +struct altcp_pcb *altcp_proxyconnect_new_tcp(struct altcp_proxyconnect_config *config, u8_t ip_type); + +struct altcp_pcb *altcp_proxyconnect_alloc(void *arg, u8_t ip_type); + +#if LWIP_ALTCP_TLS +struct altcp_proxyconnect_tls_config { + struct altcp_proxyconnect_config proxy; + struct altcp_tls_config *tls_config; +}; + +struct altcp_pcb *altcp_proxyconnect_tls_alloc(void *arg, u8_t ip_type); +#endif /* LWIP_ALTCP_TLS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_ALTCP */ +#endif /* LWIP_HDR_APPS_ALTCP_PROXYCONNECT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h new file mode 100644 index 0000000..63f72e3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/altcp_tls_mbedtls_opts.h @@ -0,0 +1,67 @@ +/** + * @file + * Application layered TCP/TLS connection API (to be used from TCPIP thread) + * + * This file contains options for an mbedtls port of the TLS layer. + */ + +/* + * Copyright (c) 2017 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_ALTCP_TLS_OPTS_H +#define LWIP_HDR_ALTCP_TLS_OPTS_H + +#include "lwip/opt.h" + +#if LWIP_ALTCP /* don't build if not configured for use in lwipopts.h */ + +/** LWIP_ALTCP_TLS_MBEDTLS==1: use mbedTLS for TLS support for altcp API + * mbedtls include directory must be reachable via include search path + */ +#ifndef LWIP_ALTCP_TLS_MBEDTLS +#define LWIP_ALTCP_TLS_MBEDTLS 0 +#endif + +/** Configure debug level of this file */ +#ifndef ALTCP_MBEDTLS_DEBUG +#define ALTCP_MBEDTLS_DEBUG LWIP_DBG_OFF +#endif + +/** Set a session timeout in seconds for the basic session cache + * ATTENTION: Using a session cache can lower security by reusing keys! + */ +#ifndef ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS +#define ALTCP_MBEDTLS_SESSION_CACHE_TIMEOUT_SECONDS 0 +#endif + +#endif /* LWIP_ALTCP */ + +#endif /* LWIP_HDR_ALTCP_TLS_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h new file mode 100644 index 0000000..02bde1f --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/fs.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_APPS_FS_H +#define LWIP_HDR_APPS_FS_H + +#include "httpd_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define FS_READ_EOF -1 +#define FS_READ_DELAYED -2 + +#if HTTPD_PRECALCULATED_CHECKSUM +struct fsdata_chksum { + u32_t offset; + u16_t chksum; + u16_t len; +}; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + +#define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 +#define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 +#define FS_FILE_FLAGS_HEADER_HTTPVER_1_1 0x04 +#define FS_FILE_FLAGS_SSI 0x08 + +/** Define FS_FILE_EXTENSION_T_DEFINED if you have typedef'ed to your private + * pointer type (defaults to 'void' so the default usage is 'void*') + */ +#ifndef FS_FILE_EXTENSION_T_DEFINED +typedef void fs_file_extension; +#endif + +struct fs_file { + const char *data; + int len; + int index; + /* pextension is free for implementations to hold private (extensional) + arbitrary data, e.g. holding some file state or file system handle */ + fs_file_extension *pextension; +#if HTTPD_PRECALCULATED_CHECKSUM + const struct fsdata_chksum *chksum; + u16_t chksum_count; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ + u8_t flags; +#if LWIP_HTTPD_CUSTOM_FILES + u8_t is_custom_file; +#endif /* LWIP_HTTPD_CUSTOM_FILES */ +#if LWIP_HTTPD_FILE_STATE + void *state; +#endif /* LWIP_HTTPD_FILE_STATE */ +}; + +#if LWIP_HTTPD_FS_ASYNC_READ +typedef void (*fs_wait_cb)(void *arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ + +err_t fs_open(struct fs_file *file, const char *name); +void fs_close(struct fs_file *file); +#if LWIP_HTTPD_DYNAMIC_FILE_READ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); +#else /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_read(struct fs_file *file, char *buffer, int count); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +#endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ +#if LWIP_HTTPD_FS_ASYNC_READ +int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); +#endif /* LWIP_HTTPD_FS_ASYNC_READ */ +int fs_bytes_left(struct fs_file *file); + +#if LWIP_HTTPD_FILE_STATE +/** This user-defined function is called when a file is opened. */ +void *fs_state_init(struct fs_file *file, const char *name); +/** This user-defined function is called when a file is closed. */ +void fs_state_free(struct fs_file *file, void *state); +#endif /* #if LWIP_HTTPD_FILE_STATE */ + +struct fsdata_file { + const struct fsdata_file *next; + const unsigned char *name; + const unsigned char *data; + int len; + u8_t flags; +#if HTTPD_PRECALCULATED_CHECKSUM + u16_t chksum_count; + const struct fsdata_chksum *chksum; +#endif /* HTTPD_PRECALCULATED_CHECKSUM */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_FS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h new file mode 100644 index 0000000..f4754b3 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/http_client.h @@ -0,0 +1,160 @@ +/** + * @file + * HTTP client + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ + +#ifndef LWIP_HDR_APPS_HTTP_CLIENT_H +#define LWIP_HDR_APPS_HTTP_CLIENT_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/altcp.h" +#include "lwip/prot/iana.h" +#include "lwip/pbuf.h" + +#if LWIP_TCP && LWIP_CALLBACK_API + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @ingroup httpc + * HTTPC_HAVE_FILE_IO: define this to 1 to have functions dowloading directly + * to disk via fopen/fwrite. + * These functions are example implementations of the interface only. + */ +#ifndef LWIP_HTTPC_HAVE_FILE_IO +#define LWIP_HTTPC_HAVE_FILE_IO 0 +#endif + +/** + * @ingroup httpc + * The default TCP port used for HTTP + */ +#define HTTP_DEFAULT_PORT LWIP_IANA_PORT_HTTP + +/** + * @ingroup httpc + * HTTP client result codes + */ +typedef enum ehttpc_result { + /** File successfully received */ + HTTPC_RESULT_OK = 0, + /** Unknown error */ + HTTPC_RESULT_ERR_UNKNOWN = 1, + /** Connection to server failed */ + HTTPC_RESULT_ERR_CONNECT = 2, + /** Failed to resolve server hostname */ + HTTPC_RESULT_ERR_HOSTNAME = 3, + /** Connection unexpectedly closed by remote server */ + HTTPC_RESULT_ERR_CLOSED = 4, + /** Connection timed out (server didn't respond in time) */ + HTTPC_RESULT_ERR_TIMEOUT = 5, + /** Server responded with an error code */ + HTTPC_RESULT_ERR_SVR_RESP = 6, + /** Local memory error */ + HTTPC_RESULT_ERR_MEM = 7, + /** Local abort */ + HTTPC_RESULT_LOCAL_ABORT = 8, + /** Content length mismatch */ + HTTPC_RESULT_ERR_CONTENT_LEN = 9 +} httpc_result_t; + +typedef struct _httpc_state httpc_state_t; + +/** + * @ingroup httpc + * Prototype of a http client callback function + * + * @param arg argument specified when initiating the request + * @param httpc_result result of the http transfer (see enum httpc_result_t) + * @param rx_content_len number of bytes received (without headers) + * @param srv_res this contains the http status code received (if any) + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*httpc_result_fn)(void *arg, httpc_result_t httpc_result, u32_t rx_content_len, u32_t srv_res, err_t err); + +/** + * @ingroup httpc + * Prototype of http client callback: called when the headers are received + * + * @param connection http client connection + * @param arg argument specified when initiating the request + * @param hdr header pbuf(s) (may contain data also) + * @param hdr_len length of the heders in 'hdr' + * @param content_len content length as received in the headers (-1 if not received) + * @return if != ERR_OK is returned, the connection is aborted + */ +typedef err_t (*httpc_headers_done_fn)(httpc_state_t *connection, void *arg, struct pbuf *hdr, u16_t hdr_len, u32_t content_len); + +typedef struct _httpc_connection { + ip_addr_t proxy_addr; + u16_t proxy_port; + u8_t use_proxy; + /* @todo: add username:pass? */ + +#if LWIP_ALTCP + altcp_allocator_t *altcp_allocator; +#endif + + /* this callback is called when the transfer is finished (or aborted) */ + httpc_result_fn result_fn; + /* this callback is called after receiving the http headers + It can abort the connection by returning != ERR_OK */ + httpc_headers_done_fn headers_done_fn; +} httpc_connection_t; + +err_t httpc_get_file(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); +err_t httpc_get_file_dns(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + altcp_recv_fn recv_fn, void* callback_arg, httpc_state_t **connection); + +#if LWIP_HTTPC_HAVE_FILE_IO +err_t httpc_get_file_to_disk(const ip_addr_t* server_addr, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +err_t httpc_get_file_dns_to_disk(const char* server_name, u16_t port, const char* uri, const httpc_connection_t *settings, + void* callback_arg, const char* local_file_name, httpc_state_t **connection); +#endif /* LWIP_HTTPC_HAVE_FILE_IO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_TCP && LWIP_CALLBACK_API */ + +#endif /* LWIP_HDR_APPS_HTTP_CLIENT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h new file mode 100644 index 0000000..615261e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd.h @@ -0,0 +1,255 @@ +/** + * @file + * HTTP server + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_H +#define LWIP_HDR_APPS_HTTPD_H + +#include "httpd_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_HTTPD_CGI + +/** + * @ingroup httpd + * Function pointer for a CGI script handler. + * + * This function is called each time the HTTPD server is asked for a file + * whose name was previously registered as a CGI function using a call to + * http_set_cgi_handlers. The iIndex parameter provides the index of the + * CGI within the cgis array passed to http_set_cgi_handlers. Parameters + * pcParam and pcValue provide access to the parameters provided along with + * the URI. iNumParams provides a count of the entries in the pcParam and + * pcValue arrays. Each entry in the pcParam array contains the name of a + * parameter with the corresponding entry in the pcValue array containing the + * value for that parameter. Note that pcParam may contain multiple elements + * with the same name if, for example, a multi-selection list control is used + * in the form generating the data. + * + * The function should return a pointer to a character string which is the + * path and filename of the response that is to be sent to the connected + * browser, for example "/thanks.htm" or "/response/error.ssi". + * + * The maximum number of parameters that will be passed to this function via + * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in + * the incoming HTTP request above this number will be discarded. + * + * Requests intended for use by this CGI mechanism must be sent using the GET + * method (which encodes all parameters within the URI rather than in a block + * later in the request). Attempts to use the POST method will result in the + * request being ignored. + * + */ +typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], + char *pcValue[]); + +/** + * @ingroup httpd + * Structure defining the base filename (URL) of a CGI and the associated + * function which is to be called when that URL is requested. + */ +typedef struct +{ + const char *pcCGIName; + tCGIHandler pfnCGIHandler; +} tCGI; + +void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); + +#endif /* LWIP_HTTPD_CGI */ + +#if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI + +#if LWIP_HTTPD_CGI_SSI +/* we have to prototype this struct here to make it available for the handler */ +struct fs_file; + +/** Define this generic CGI handler in your application. + * It is called once for every URI with parameters. + * The parameters can be stored to the object passed as connection_state, which + * is allocated to file->state via fs_state_init() from fs_open() or fs_open_custom(). + * Content creation via SSI or complete dynamic files can retrieve the CGI params from there. + */ +extern void httpd_cgi_handler(struct fs_file *file, const char* uri, int iNumParams, + char **pcParam, char **pcValue +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); +#endif /* LWIP_HTTPD_CGI_SSI */ + +#endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ + +#if LWIP_HTTPD_SSI + +/** + * @ingroup httpd + * Function pointer for the SSI tag handler callback. + * + * This function will be called each time the HTTPD server detects a tag of the + * form in files with extensions mentioned in the g_pcSSIExtensions + * array (currently .shtml, .shtm, .ssi, .xml, .json) where "name" appears as + * one of the tags supplied to http_set_ssi_handler in the tags array. The + * returned insert string, which will be appended after the the string + * "" in file sent back to the client, should be written to pointer + * pcInsert. iInsertLen contains the size of the buffer pointed to by + * pcInsert. The iIndex parameter provides the zero-based index of the tag as + * found in the tags array and identifies the tag that is to be processed. + * + * The handler returns the number of characters written to pcInsert excluding + * any terminating NULL or HTTPD_SSI_TAG_UNKNOWN when tag is not recognized. + * + * Note that the behavior of this SSI mechanism is somewhat different from the + * "normal" SSI processing as found in, for example, the Apache web server. In + * this case, the inserted text is appended following the SSI tag rather than + * replacing the tag entirely. This allows for an implementation that does not + * require significant additional buffering of output data yet which will still + * offer usable SSI functionality. One downside to this approach is when + * attempting to use SSI within JavaScript. The SSI tag is structured to + * resemble an HTML comment but this syntax does not constitute a comment + * within JavaScript and, hence, leaving the tag in place will result in + * problems in these cases. In order to avoid these problems, define + * LWIP_HTTPD_SSI_INCLUDE_TAG as zero in your lwip options file, or use JavaScript + * style block comments in the form / * # name * / (without the spaces). + */ +typedef u16_t (*tSSIHandler)( +#if LWIP_HTTPD_SSI_RAW + const char* ssi_tag_name, +#else /* LWIP_HTTPD_SSI_RAW */ + int iIndex, +#endif /* LWIP_HTTPD_SSI_RAW */ + char *pcInsert, int iInsertLen +#if LWIP_HTTPD_SSI_MULTIPART + , u16_t current_tag_part, u16_t *next_tag_part +#endif /* LWIP_HTTPD_SSI_MULTIPART */ +#if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE + , void *connection_state +#endif /* LWIP_HTTPD_FILE_STATE */ + ); + +/** Set the SSI handler function + * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) + */ +void http_set_ssi_handler(tSSIHandler pfnSSIHandler, + const char **ppcTags, int iNumTags); + +/** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. + * In this case, the webserver writes a warning into the page. + * You can also just return 0 to write nothing for unknown tags. + */ +#define HTTPD_SSI_TAG_UNKNOWN 0xFFFF + +#endif /* LWIP_HTTPD_SSI */ + +#if LWIP_HTTPD_SUPPORT_POST + +/* These functions must be implemented by the application */ + +/** + * @ingroup httpd + * Called when a POST request has been received. The application can decide + * whether to accept it or not. + * + * @param connection Unique connection identifier, valid until httpd_post_end + * is called. + * @param uri The HTTP header URI receiving the POST request. + * @param http_request The raw HTTP request (the first packet, normally). + * @param http_request_len Size of 'http_request'. + * @param content_len Content-Length from HTTP header. + * @param response_uri Filename of response file, to be filled when denying the + * request + * @param response_uri_len Size of the 'response_uri' buffer. + * @param post_auto_wnd Set this to 0 to let the callback code handle window + * updates by calling 'httpd_post_data_recved' (to throttle rx speed) + * default is 1 (httpd handles window updates automatically) + * @return ERR_OK: Accept the POST request, data may be passed in + * another err_t: Deny the POST request, send back 'bad request'. + */ +err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, + u16_t http_request_len, int content_len, char *response_uri, + u16_t response_uri_len, u8_t *post_auto_wnd); + +/** + * @ingroup httpd + * Called for each pbuf of data that has been received for a POST. + * ATTENTION: The application is responsible for freeing the pbufs passed in! + * + * @param connection Unique connection identifier. + * @param p Received data. + * @return ERR_OK: Data accepted. + * another err_t: Data denied, http_post_get_response_uri will be called. + */ +err_t httpd_post_receive_data(void *connection, struct pbuf *p); + +/** + * @ingroup httpd + * Called when all data is received or when the connection is closed. + * The application must return the filename/URI of a file to send in response + * to this POST request. If the response_uri buffer is untouched, a 404 + * response is returned. + * + * @param connection Unique connection identifier. + * @param response_uri Filename of response file, to be filled when denying the request + * @param response_uri_len Size of the 'response_uri' buffer. + */ +void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); + +#if LWIP_HTTPD_POST_MANUAL_WND +void httpd_post_data_recved(void *connection, u16_t recved_len); +#endif /* LWIP_HTTPD_POST_MANUAL_WND */ + +#endif /* LWIP_HTTPD_SUPPORT_POST */ + +void httpd_init(void); + +#if HTTPD_ENABLE_HTTPS +struct altcp_tls_config; +void httpd_inits(struct altcp_tls_config *conf); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_HTTPD_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h new file mode 100644 index 0000000..733e117 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/httpd_opts.h @@ -0,0 +1,396 @@ +/** + * @file + * HTTP server options list + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + * This version of the file has been modified by Texas Instruments to offer + * simple server-side-include (SSI) and Common Gateway Interface (CGI) + * capability. + */ + +#ifndef LWIP_HDR_APPS_HTTPD_OPTS_H +#define LWIP_HDR_APPS_HTTPD_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup httpd_opts Options + * @ingroup httpd + * @{ + */ + +/** Set this to 1 to support CGI (old style). + * + * This old style CGI support works by registering an array of URLs and + * associated CGI handler functions (@ref http_set_cgi_handlers). + * This list is scanned just before fs_open is called from request handling. + * The handler can return a new URL that is used internally by the httpd to + * load the returned page (passed to fs_open). + * + * Use this CGI type e.g. to execute specific actions and return a page that + * does not depend on the CGI parameters. + */ +#if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI 0 +#endif + +/** Set this to 1 to support CGI (new style). + * + * This new style CGI support works by calling a global function + * (@ref tCGIHandler) for all URLs that are found. fs_open is called first + * and the URL can not be written by the CGI handler. Instead, this handler gets + * passed the http file state, an object where it can store information derived + * from the CGI URL or parameters. This file state is later passed to SSI, so + * the SSI code can return data depending on CGI input. + * + * Use this CGI handler if you want CGI information passed on to SSI. + */ +#if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_CGI_SSI 0 +#endif + +/** Set this to 1 to support SSI (Server-Side-Includes) + * + * In contrast to other http servers, this only calls a preregistered callback + * function (@see http_set_ssi_handler) for each tag (in the format of + * ) encountered in SSI-enabled pages. + * SSI-enabled pages must have one of the predefined SSI-enabled file extensions. + * All files with one of these extensions are parsed when sent. + * + * A downside of the current SSI implementation is that persistent connections + * don't work, as the file length is not known in advance (and httpd currently + * relies on the Content-Length header for persistent connections). + * + * To save memory, the maximum tag length is limited (@see LWIP_HTTPD_MAX_TAG_NAME_LEN). + * To save memory, the maximum insertion string length is limited (@see + * LWIP_HTTPD_MAX_TAG_INSERT_LEN). If this is not enought, @ref LWIP_HTTPD_SSI_MULTIPART + * can be used. + */ +#if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI 0 +#endif + +/** Set this to 1 to implement an SSI tag handler callback that gets a const char* + * to the tag (instead of an index into a pre-registered array of known tags) + * If this is 0, the SSI handler callback function is only called pre-registered tags. + */ +#if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_RAW 0 +#endif + +/** Set this to 0 to prevent parsing the file extension at runtime to decide + * if a file should be scanned for SSI tags or not. + * Default is 1 (file extensions are checked using the g_pcSSIExtensions array) + * Set to 2 to override this runtime test function. + * + * This is enabled by default, but if you only use a newer version of makefsdata + * supporting the "-ssi" option, this info is already present in + */ +#if !defined LWIP_HTTPD_SSI_BY_FILE_EXTENSION || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_BY_FILE_EXTENSION 1 +#endif + +/** Set this to 1 to support HTTP POST */ +#if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_POST 0 +#endif + +/* The maximum number of parameters that the CGI handler can be sent. */ +#if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 +#endif + +/** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more + * arguments indicating a counter for insert string that are too long to be + * inserted at once: the SSI handler function must then set 'next_tag_part' + * which will be passed back to it in the next call. */ +#if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_MULTIPART 0 +#endif + +/* The maximum length of the string comprising the SSI tag name + * ATTENTION: tags longer than this are ignored, not truncated! + */ +#if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 +#endif + +/* The maximum length of string that can be returned to replace any given tag + * If this buffer is not long enough, use LWIP_HTTPD_SSI_MULTIPART. + */ +#if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 +#endif + +#if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MANUAL_WND 0 +#endif + +/** This string is passed in the HTTP header as "Server: " */ +#if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ +#define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" +#endif + +/** Set this to 1 if you want to include code that creates HTTP headers + * at runtime. Default is off: HTTP headers are then created statically + * by the makefsdata tool. Static headers mean smaller code size, but + * the (readonly) fsdata will grow a bit as every file includes the HTTP + * header. */ +#if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_HEADERS 0 +#endif + +#if !defined HTTPD_DEBUG || defined __DOXYGEN__ +#define HTTPD_DEBUG LWIP_DBG_OFF +#endif + +/** Set this to 1 to use a memp pool for allocating + * struct http_state instead of the heap. + * If enabled, you'll need to define MEMP_NUM_PARALLEL_HTTPD_CONNS + * (and MEMP_NUM_PARALLEL_HTTPD_SSI_CONNS for SSI) to set the size of + * the pool(s). + */ +#if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ +#define HTTPD_USE_MEM_POOL 0 +#endif + +/** The server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT LWIP_IANA_PORT_HTTP +#endif + +/** The https server port for HTTPD to use */ +#if !defined HTTPD_SERVER_PORT_HTTPS || defined __DOXYGEN__ +#define HTTPD_SERVER_PORT_HTTPS LWIP_IANA_PORT_HTTPS +#endif + +/** Enable https support? */ +#if !defined HTTPD_ENABLE_HTTPS || defined __DOXYGEN__ +#define HTTPD_ENABLE_HTTPS 0 +#endif + +/** Maximum retries before the connection is aborted/closed. + * - number of times pcb->poll is called -> default is 4*500ms = 2s; + * - reset when pcb->sent is called + */ +#if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ +#define HTTPD_MAX_RETRIES 4 +#endif + +/** The poll delay is X*500ms */ +#if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ +#define HTTPD_POLL_INTERVAL 4 +#endif + +/** Priority for tcp pcbs created by HTTPD (very low by default). + * Lower priorities get killed first when running out of memory. + */ +#if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ +#define HTTPD_TCP_PRIO TCP_PRIO_MIN +#endif + +/** Set this to 1 to enable timing each file sent */ +#if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ +#define LWIP_HTTPD_TIMING 0 +#endif +/** Set this to 1 to enable timing each file sent */ +#if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ +#define HTTPD_DEBUG_TIMING LWIP_DBG_OFF +#endif + +/** Set this to one to show error pages when parsing a request fails instead + of simply closing the connection. */ +#if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 +#endif + +/** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ +#if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_V09 1 +#endif + +/** Set this to 1 to enable HTTP/1.1 persistent connections. + * ATTENTION: If the generated file system includes HTTP headers, these must + * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). + */ +#if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 +#endif + +/** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ +#if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ +#define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 +#endif + +#if LWIP_HTTPD_SUPPORT_REQUESTLIST +/** Number of rx pbufs to enqueue to parse an incoming request (up to the first + newline) */ +#if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_QUEUELEN 5 +#endif + +/** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming + request (up to the first double-newline) */ +#if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ +#define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH +#endif + +/** Defines the maximum length of a HTTP request line (up to the first CRLF, + copied from pbuf into this a global buffer when pbuf- or packet-queues + are received - otherwise the input pbuf is used directly) */ +#if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) +#endif +#endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ + +/** This is the size of a static buffer used when URIs end with '/'. + * In this buffer, the directory requested is concatenated with all the + * configured default file names. + * Set to 0 to disable checking default filenames on non-root directories. + */ +#if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 +#endif + +/** Maximum length of the filename to send as response to a POST request, + * filled in by the application when a POST is finished. + */ +#if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ +#define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 +#endif + +/** Set this to 0 to not send the SSI tag (default is on, so the tag will + * be sent in the HTML page */ +#if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ +#define LWIP_HTTPD_SSI_INCLUDE_TAG 1 +#endif + +/** Set this to 1 to call tcp_abort when tcp_close fails with memory error. + * This can be used to prevent consuming all memory in situations where the + * HTTP server has low priority compared to other communication. */ +#if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ +#define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 +#endif + +/** Set this to 1 to kill the oldest connection when running out of + * memory for 'struct http_state' or 'struct http_ssi_state'. + * ATTENTION: This puts all connections on a linked list, so may be kind of slow. + */ +#if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ +#define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 +#endif + +/** Set this to 1 to send URIs without extension without headers + * (who uses this at all??) */ +#if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ +#define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 +#endif + +/** Default: Tags are sent from struct http_state and are therefore volatile */ +#if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ +#define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY +#endif + +/* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low + when http is not an important protocol in the device. */ +#if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ +#define HTTPD_LIMIT_SENDING_TO_2MSS 1 +#endif + +/* Define this to a function that returns the maximum amount of data to enqueue. + The function have this signature: u16_t fn(struct altcp_pcb* pcb); + The best place to define this is the hooks file (@see LWIP_HOOK_FILENAME) */ +#if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ +#if HTTPD_LIMIT_SENDING_TO_2MSS +#define HTTPD_MAX_WRITE_LEN(pcb) ((u16_t)(2 * altcp_mss(pcb))) +#endif +#endif + +/*------------------- FS OPTIONS -------------------*/ + +/** Set this to 1 and provide the functions: + * - "int fs_open_custom(struct fs_file *file, const char *name)" + * Called first for every opened file to allow opening files + * that are not included in fsdata(_custom).c + * - "void fs_close_custom(struct fs_file *file)" + * Called to free resources allocated by fs_open_custom(). + */ +#if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ +#define LWIP_HTTPD_CUSTOM_FILES 0 +#endif + +/** Set this to 1 to support fs_read() to dynamically read file data. + * Without this (default=off), only one-block files are supported, + * and the contents must be ready after fs_open(). + */ +#if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_DYNAMIC_FILE_READ 0 +#endif + +/** Set this to 1 to include an application state argument per file + * that is opened. This allows to keep a state per connection/file. + */ +#if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ +#define LWIP_HTTPD_FILE_STATE 0 +#endif + +/** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for + * predefined (MSS-sized) chunks of the files to prevent having to calculate + * the checksums at runtime. */ +#if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ +#define HTTPD_PRECALCULATED_CHECKSUM 0 +#endif + +/** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations + * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). + */ +#if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ +#define LWIP_HTTPD_FS_ASYNC_READ 0 +#endif + +/** Filename (including path) to use as FS data file */ +#if !defined HTTPD_FSDATA_FILE || defined __DOXYGEN__ +/* HTTPD_USE_CUSTOM_FSDATA: Compatibility with deprecated lwIP option */ +#if defined(HTTPD_USE_CUSTOM_FSDATA) && (HTTPD_USE_CUSTOM_FSDATA != 0) +#define HTTPD_FSDATA_FILE "fsdata_custom.c" +#else +#define HTTPD_FSDATA_FILE "fsdata.c" +#endif +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h new file mode 100644 index 0000000..ff4d6f7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/lwiperf.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP iPerf server implementation + */ + +/* + * Copyright (c) 2014 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_LWIPERF_H +#define LWIP_HDR_APPS_LWIPERF_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIPERF_TCP_PORT_DEFAULT 5001 + +/** lwIPerf test results */ +enum lwiperf_report_type +{ + /** The server side test is done */ + LWIPERF_TCP_DONE_SERVER, + /** The client side test is done */ + LWIPERF_TCP_DONE_CLIENT, + /** Local error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL, + /** Data check error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, + /** Transmit error lead to test abort */ + LWIPERF_TCP_ABORTED_LOCAL_TXERROR, + /** Remote side aborted the test */ + LWIPERF_TCP_ABORTED_REMOTE +}; + +/** Control */ +enum lwiperf_client_type +{ + /** Unidirectional tx only test */ + LWIPERF_CLIENT, + /** Do a bidirectional test simultaneously */ + LWIPERF_DUAL, + /** Do a bidirectional test individually */ + LWIPERF_TRADEOFF +}; + +/** Prototype of a report function that is called when a session is finished. + This report function can show the test results. + @param report_type contains the test result */ +typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, + const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, + u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); + +void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client(const ip_addr_t* remote_addr, u16_t remote_port, + enum lwiperf_client_type type, + lwiperf_report_fn report_fn, void* report_arg); +void* lwiperf_start_tcp_client_default(const ip_addr_t* remote_addr, + lwiperf_report_fn report_fn, void* report_arg); + +void lwiperf_abort(void* lwiperf_session); + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_LWIPERF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h new file mode 100644 index 0000000..3c3d054 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns.h @@ -0,0 +1,105 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_H +#define LWIP_HDR_APPS_MDNS_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +enum mdns_sd_proto { + DNSSD_PROTO_UDP = 0, + DNSSD_PROTO_TCP = 1 +}; + +#define MDNS_PROBING_CONFLICT 0 +#define MDNS_PROBING_SUCCESSFUL 1 + +#define MDNS_LABEL_MAXLEN 63 + +struct mdns_host; +struct mdns_service; + +/** Callback function to add text to a reply, called when generating the reply */ +typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); + +/** Callback function to let application know the result of probing network for name + * uniqueness, called with result MDNS_PROBING_SUCCESSFUL if no other node claimed + * use for the name for the netif or a service and is safe to use, or MDNS_PROBING_CONFLICT + * if another node is already using it and mdns is disabled on this interface */ +typedef void (*mdns_name_result_cb_t)(struct netif* netif, u8_t result); + +void mdns_resp_init(void); + +void mdns_resp_register_name_result_cb(mdns_name_result_cb_t cb); + +err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); +err_t mdns_resp_remove_netif(struct netif *netif); +err_t mdns_resp_rename_netif(struct netif *netif, const char *hostname); + +s8_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); +err_t mdns_resp_del_service(struct netif *netif, s8_t slot); +err_t mdns_resp_rename_service(struct netif *netif, s8_t slot, const char *name); + +err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); + +void mdns_resp_restart(struct netif *netif); +void mdns_resp_announce(struct netif *netif); + +/** + * @ingroup mdns + * Announce IP settings have changed on netif. + * Call this in your callback registered by netif_set_status_callback(). + * No need to call this function when LWIP_NETIF_EXT_STATUS_CALLBACK==1, + * this handled automatically for you. + * @param netif The network interface where settings have changed. + */ +#define mdns_resp_netif_settings_changed(netif) mdns_resp_announce(netif) + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MDNS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h new file mode 100644 index 0000000..74d3f41 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_opts.h @@ -0,0 +1,81 @@ +/** + * @file + * MDNS responder + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ + +#ifndef LWIP_HDR_APPS_MDNS_OPTS_H +#define LWIP_HDR_APPS_MDNS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup mdns_opts Options + * @ingroup mdns + * @{ + */ + +/** + * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS + * transport. IGMP is needed for IPv4 multicast. + */ +#ifndef LWIP_MDNS_RESPONDER +#define LWIP_MDNS_RESPONDER 0 +#endif /* LWIP_MDNS_RESPONDER */ + +/** The maximum number of services per netif */ +#ifndef MDNS_MAX_SERVICES +#define MDNS_MAX_SERVICES 1 +#endif + +/** MDNS_RESP_USENETIF_EXTCALLBACK==1: register an ext_callback on the netif + * to automatically restart probing/announcing on status or address change. + */ +#ifndef MDNS_RESP_USENETIF_EXTCALLBACK +#define MDNS_RESP_USENETIF_EXTCALLBACK LWIP_NETIF_EXT_STATUS_CALLBACK +#endif + +/** + * MDNS_DEBUG: Enable debugging for multicast DNS. + */ +#ifndef MDNS_DEBUG +#define MDNS_DEBUG LWIP_DBG_OFF +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_MDNS_OPTS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h new file mode 100644 index 0000000..2394157 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mdns_priv.h @@ -0,0 +1,74 @@ +/** + * @file + * MDNS responder private definitions + */ + + /* + * Copyright (c) 2015 Verisure Innovation AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Ekman + * + */ +#ifndef LWIP_HDR_MDNS_PRIV_H +#define LWIP_HDR_MDNS_PRIV_H + +#include "lwip/apps/mdns_opts.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_MDNS_RESPONDER + +/* Domain struct and methods - visible for unit tests */ + +#define MDNS_DOMAIN_MAXLEN 256 +#define MDNS_READNAME_ERROR 0xFFFF + +struct mdns_domain { + /* Encoded domain name */ + u8_t name[MDNS_DOMAIN_MAXLEN]; + /* Total length of domain name, including zero */ + u16_t length; + /* Set if compression of this domain is not allowed */ + u8_t skip_compression; +}; + +err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); +u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); +int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); +u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); + +#endif /* LWIP_MDNS_RESPONDER */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_MDNS_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h new file mode 100644 index 0000000..e796ae1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt.h @@ -0,0 +1,205 @@ +/** + * @file + * MQTT client + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_CLIENT_H +#define LWIP_HDR_APPS_MQTT_CLIENT_H + +#include "lwip/apps/mqtt_opts.h" +#include "lwip/err.h" +#include "lwip/ip_addr.h" +#include "lwip/prot/iana.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mqtt_client_s mqtt_client_t; + +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +#endif + +/** @ingroup mqtt + * Default MQTT port (non-TLS) */ +#define MQTT_PORT LWIP_IANA_PORT_MQTT +/** @ingroup mqtt + * Default MQTT TLS port */ +#define MQTT_TLS_PORT LWIP_IANA_PORT_SECURE_MQTT + +/*---------------------------------------------------------------------------------------------- */ +/* Connection with server */ + +/** + * @ingroup mqtt + * Client information and connection parameters */ +struct mqtt_connect_client_info_t { + /** Client identifier, must be set by caller */ + const char *client_id; + /** User name, set to NULL if not used */ + const char* client_user; + /** Password, set to NULL if not used */ + const char* client_pass; + /** keep alive time in seconds, 0 to disable keep alive functionality*/ + u16_t keep_alive; + /** will topic, set to NULL if will is not to be used, + will_msg, will_qos and will retain are then ignored */ + const char* will_topic; + /** will_msg, see will_topic */ + const char* will_msg; + /** will_qos, see will_topic */ + u8_t will_qos; + /** will_retain, see will_topic */ + u8_t will_retain; +#if LWIP_ALTCP && LWIP_ALTCP_TLS + /** TLS configuration for secure connections */ + struct altcp_tls_config *tls_config; +#endif +}; + +/** + * @ingroup mqtt + * Connection status codes */ +typedef enum +{ + /** Accepted */ + MQTT_CONNECT_ACCEPTED = 0, + /** Refused protocol version */ + MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, + /** Refused identifier */ + MQTT_CONNECT_REFUSED_IDENTIFIER = 2, + /** Refused server */ + MQTT_CONNECT_REFUSED_SERVER = 3, + /** Refused user credentials */ + MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, + /** Refused not authorized */ + MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, + /** Disconnected */ + MQTT_CONNECT_DISCONNECTED = 256, + /** Timeout */ + MQTT_CONNECT_TIMEOUT = 257 +} mqtt_connection_status_t; + +/** + * @ingroup mqtt + * Function prototype for mqtt connection status callback. Called when + * client has connected to the server after initiating a mqtt connection attempt by + * calling mqtt_client_connect() or when connection is closed by server or an error + * + * @param client MQTT client itself + * @param arg Additional argument to pass to the callback function + * @param status Connect result code or disconnection notification @see mqtt_connection_status_t + * + */ +typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); + + +/** + * @ingroup mqtt + * Data callback flags */ +enum { + /** Flag set when last fragment of data arrives in data callback */ + MQTT_DATA_FLAG_LAST = 1 +}; + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish data callback function. Called when data + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param data User data, pointed object, data may not be referenced after callback return, + NULL is passed when all publish data are delivered + * @param len Length of publish data fragment + * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message + * + */ +typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); + + +/** + * @ingroup mqtt + * Function prototype for MQTT incoming publish function. Called when an incoming publish + * arrives to a subscribed topic @see mqtt_subscribe + * + * @param arg Additional argument to pass to the callback function + * @param topic Zero terminated Topic text string, topic may not be referenced after callback return + * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked + */ +typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); + + +/** + * @ingroup mqtt + * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe + * or publish request has completed + * @param arg Pointer to user data supplied when invoking request + * @param err ERR_OK on success + * ERR_TIMEOUT if no response was received within timeout, + * ERR_ABRT if (un)subscribe was denied + */ +typedef void (*mqtt_request_cb_t)(void *arg, err_t err); + + +err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, + const struct mqtt_connect_client_info_t *client_info); + +void mqtt_disconnect(mqtt_client_t *client); + +mqtt_client_t *mqtt_client_new(void); +void mqtt_client_free(mqtt_client_t* client); + +u8_t mqtt_client_is_connected(mqtt_client_t *client); + +void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, + mqtt_incoming_data_cb_t data_cb, void *arg); + +err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); + +/** @ingroup mqtt + *Subscribe to topic */ +#define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) +/** @ingroup mqtt + * Unsubscribe to topic */ +#define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) + +err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, + mqtt_request_cb_t cb, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h new file mode 100644 index 0000000..e056130 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_opts.h @@ -0,0 +1,103 @@ +/** + * @file + * MQTT client options + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_OPTS_H +#define LWIP_HDR_APPS_MQTT_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup mqtt_opts Options + * @ingroup mqtt + * @{ + */ + +/** + * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads + */ +#ifndef MQTT_OUTPUT_RINGBUF_SIZE +#define MQTT_OUTPUT_RINGBUF_SIZE 256 +#endif + +/** + * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 + * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 + */ +#ifndef MQTT_VAR_HEADER_BUFFER_LEN +#define MQTT_VAR_HEADER_BUFFER_LEN 128 +#endif + +/** + * Maximum number of pending subscribe, unsubscribe and publish requests to server . + */ +#ifndef MQTT_REQ_MAX_IN_FLIGHT +#define MQTT_REQ_MAX_IN_FLIGHT 4 +#endif + +/** + * Seconds between each cyclic timer call. + */ +#ifndef MQTT_CYCLIC_TIMER_INTERVAL +#define MQTT_CYCLIC_TIMER_INTERVAL 5 +#endif + +/** + * Publish, subscribe and unsubscribe request timeout in seconds. + */ +#ifndef MQTT_REQ_TIMEOUT +#define MQTT_REQ_TIMEOUT 30 +#endif + +/** + * Seconds for MQTT connect response timeout after sending connect request + */ +#ifndef MQTT_CONNECT_TIMOUT +#define MQTT_CONNECT_TIMOUT 100 +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h new file mode 100644 index 0000000..04d2336 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/mqtt_priv.h @@ -0,0 +1,104 @@ +/** + * @file + * MQTT client (private interface) + */ + +/* + * Copyright (c) 2016 Erik Andersson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Erik Andersson + * + */ +#ifndef LWIP_HDR_APPS_MQTT_PRIV_H +#define LWIP_HDR_APPS_MQTT_PRIV_H + +#include "lwip/apps/mqtt.h" +#include "lwip/altcp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** Pending request item, binds application callback to pending server requests */ +struct mqtt_request_t +{ + /** Next item in list, NULL means this is the last in chain, + next pointing at itself means request is unallocated */ + struct mqtt_request_t *next; + /** Callback to upper layer */ + mqtt_request_cb_t cb; + void *arg; + /** MQTT packet identifier */ + u16_t pkt_id; + /** Expire time relative to element before this */ + u16_t timeout_diff; +}; + +/** Ring buffer */ +struct mqtt_ringbuf_t { + u16_t put; + u16_t get; + u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; +}; + +/** MQTT client */ +struct mqtt_client_s +{ + /** Timers and timeouts */ + u16_t cyclic_tick; + u16_t keep_alive; + u16_t server_watchdog; + /** Packet identifier generator*/ + u16_t pkt_id_seq; + /** Packet identifier of pending incoming publish */ + u16_t inpub_pkt_id; + /** Connection state */ + u8_t conn_state; + struct altcp_pcb *conn; + /** Connection callback */ + void *connect_arg; + mqtt_connection_cb_t connect_cb; + /** Pending requests to server */ + struct mqtt_request_t *pend_req_queue; + struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; + void *inpub_arg; + /** Incoming data callback */ + mqtt_incoming_data_cb_t data_cb; + mqtt_incoming_publish_cb_t pub_cb; + /** Input */ + u32_t msg_idx; + u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; + /** Output ring-buffer */ + struct mqtt_ringbuf_t output; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_MQTT_PRIV_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h new file mode 100644 index 0000000..c5aedb6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns.h @@ -0,0 +1,51 @@ +/** + * @file + * NETBIOS name service responder + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_H +#define LWIP_HDR_APPS_NETBIOS_H + +#include "lwip/apps/netbiosns_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void netbiosns_init(void); +#ifndef NETBIOS_LWIP_NAME +void netbiosns_set_name(const char* hostname); +#endif +void netbiosns_stop(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_NETBIOS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h new file mode 100644 index 0000000..0bab59e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/netbiosns_opts.h @@ -0,0 +1,66 @@ +/** + * @file + * NETBIOS name service responder options + */ + +/* + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + */ +#ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H +#define LWIP_HDR_APPS_NETBIOS_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup netbiosns_opts Options + * @ingroup netbiosns + * @{ + */ + +/** NetBIOS name of lwip device + * This must be uppercase until NETBIOS_STRCMP() is defined to a string + * comparision function that is case insensitive. + * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): + * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") + * + * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. + */ +#ifdef __DOXYGEN__ +#define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" +#endif + +/** Respond to NetBIOS name queries + * Default is disabled + */ +#if !defined LWIP_NETBIOS_RESPOND_NAME_QUERY || defined __DOXYGEN__ +#define LWIP_NETBIOS_RESPOND_NAME_QUERY 0 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h new file mode 100644 index 0000000..e6076a1 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp.h @@ -0,0 +1,128 @@ +#ifndef LWIP_HDR_APPS_SMTP_H +#define LWIP_HDR_APPS_SMTP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "lwip/apps/smtp_opts.h" +#include "lwip/err.h" +#include "lwip/prot/iana.h" + +/** The default TCP port used for SMTP */ +#define SMTP_DEFAULT_PORT LWIP_IANA_PORT_SMTP +/** The default TCP port used for SMTPS */ +#define SMTPS_DEFAULT_PORT LWIP_IANA_PORT_SMTPS + +/** Email successfully sent */ +#define SMTP_RESULT_OK 0 +/** Unknown error */ +#define SMTP_RESULT_ERR_UNKNOWN 1 +/** Connection to server failed */ +#define SMTP_RESULT_ERR_CONNECT 2 +/** Failed to resolve server hostname */ +#define SMTP_RESULT_ERR_HOSTNAME 3 +/** Connection unexpectedly closed by remote server */ +#define SMTP_RESULT_ERR_CLOSED 4 +/** Connection timed out (server didn't respond in time) */ +#define SMTP_RESULT_ERR_TIMEOUT 5 +/** Server responded with an unknown response code */ +#define SMTP_RESULT_ERR_SVR_RESP 6 +/** Out of resources locally */ +#define SMTP_RESULT_ERR_MEM 7 + +/** Prototype of an smtp callback function + * + * @param arg argument specified when initiating the email + * @param smtp_result result of the mail transfer (see defines SMTP_RESULT_*) + * @param srv_err if aborted by the server, this contains the error code received + * @param err an error returned by internal lwip functions, can help to specify + * the source of the error but must not necessarily be != ERR_OK + */ +typedef void (*smtp_result_fn)(void *arg, u8_t smtp_result, u16_t srv_err, err_t err); + +/** This structure is used as argument for smtp_send_mail_int(), + * which in turn can be used with tcpip_callback() to send mail + * from interrupt context, e.g. like this: + * struct smtp_send_request *req; (to be filled) + * tcpip_try_callback(smtp_send_mail_int, (void*)req); + * + * For member description, see parameter description of smtp_send_mail(). + * When using with tcpip_callback, this structure has to stay allocated + * (e.g. using mem_malloc/mem_free) until its 'callback_fn' is called. + */ +struct smtp_send_request { + const char *from; + const char* to; + const char* subject; + const char* body; + smtp_result_fn callback_fn; + void* callback_arg; + /** If this is != 0, data is *not* copied into an extra buffer + * but used from the pointers supplied in this struct. + * This means less memory usage, but data must stay untouched until + * the callback function is called. */ + u8_t static_data; +}; + + +#if SMTP_BODYDH + +#ifndef SMTP_BODYDH_BUFFER_SIZE +#define SMTP_BODYDH_BUFFER_SIZE 256 +#endif /* SMTP_BODYDH_BUFFER_SIZE */ + +struct smtp_bodydh { + u16_t state; + u16_t length; /* Length of content in buffer */ + char buffer[SMTP_BODYDH_BUFFER_SIZE]; /* buffer for generated content */ +#ifdef SMTP_BODYDH_USER_SIZE + u8_t user[SMTP_BODYDH_USER_SIZE]; +#endif /* SMTP_BODYDH_USER_SIZE */ +}; + +enum bdh_retvals_e { + BDH_DONE = 0, + BDH_WORKING +}; + +/** Prototype of an smtp body callback function + * It receives a struct smtp_bodydh, and a buffer to write data, + * must return BDH_WORKING to be called again and BDH_DONE when + * it has finished processing. This one tries to fill one TCP buffer with + * data, your function will be repeatedly called until that happens; so if you + * know you'll be taking too long to serve your request, pause once in a while + * by writing length=0 to avoid hogging system resources + * + * @param arg argument specified when initiating the email + * @param smtp_bodydh state handling + buffer structure + */ +typedef int (*smtp_bodycback_fn)(void *arg, struct smtp_bodydh *bodydh); + +err_t smtp_send_mail_bodycback(const char *from, const char* to, const char* subject, + smtp_bodycback_fn bodycback_fn, smtp_result_fn callback_fn, void* callback_arg); + +#endif /* SMTP_BODYDH */ + + +err_t smtp_set_server_addr(const char* server); +void smtp_set_server_port(u16_t port); +#if LWIP_ALTCP && LWIP_ALTCP_TLS +struct altcp_tls_config; +void smtp_set_tls_config(struct altcp_tls_config *tls_config); +#endif +err_t smtp_set_auth(const char* username, const char* pass); +err_t smtp_send_mail(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +err_t smtp_send_mail_static(const char *from, const char* to, const char* subject, const char* body, + smtp_result_fn callback_fn, void* callback_arg); +void smtp_send_mail_int(void *arg); +#ifdef LWIP_DEBUG +const char* smtp_result_str(u8_t smtp_result); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SMTP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h new file mode 100644 index 0000000..29c0c08 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/smtp_opts.h @@ -0,0 +1,81 @@ +#ifndef LWIP_HDR_APPS_SMTP_OPTS_H +#define LWIP_HDR_APPS_SMTP_OPTS_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup smtp_opts Options + * @ingroup smtp + * + * @{ + */ + +/** Set this to 1 to enable data handler callback on BODY */ +#ifndef SMTP_BODYDH +#define SMTP_BODYDH 0 +#endif + +/** SMTP_DEBUG: Enable debugging for SNTP. */ +#ifndef SMTP_DEBUG +#define SMTP_DEBUG LWIP_DBG_OFF +#endif + +/** Maximum length reserved for server name including terminating 0 byte */ +#ifndef SMTP_MAX_SERVERNAME_LEN +#define SMTP_MAX_SERVERNAME_LEN 256 +#endif + +/** Maximum length reserved for username */ +#ifndef SMTP_MAX_USERNAME_LEN +#define SMTP_MAX_USERNAME_LEN 32 +#endif + +/** Maximum length reserved for password */ +#ifndef SMTP_MAX_PASS_LEN +#define SMTP_MAX_PASS_LEN 32 +#endif + +/** Set this to 0 if you know the authentication data will not change + * during the smtp session, which saves some heap space. */ +#ifndef SMTP_COPY_AUTHDATA +#define SMTP_COPY_AUTHDATA 1 +#endif + +/** Set this to 0 to save some code space if you know for sure that all data + * passed to this module conforms to the requirements in the SMTP RFC. + * WARNING: use this with care! + */ +#ifndef SMTP_CHECK_DATA +#define SMTP_CHECK_DATA 1 +#endif + +/** Set this to 1 to enable AUTH PLAIN support */ +#ifndef SMTP_SUPPORT_AUTH_PLAIN +#define SMTP_SUPPORT_AUTH_PLAIN 1 +#endif + +/** Set this to 1 to enable AUTH LOGIN support */ +#ifndef SMTP_SUPPORT_AUTH_LOGIN +#define SMTP_SUPPORT_AUTH_LOGIN 1 +#endif + +/* Memory allocation/deallocation can be overridden... */ +#ifndef SMTP_STATE_MALLOC +#define SMTP_STATE_MALLOC(size) mem_malloc(size) +#define SMTP_STATE_FREE(ptr) mem_free(ptr) +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* SMTP_OPTS_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h new file mode 100644 index 0000000..15b154e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp.h @@ -0,0 +1,135 @@ +/** + * @file + * SNMP server main API - start and basic configuration + */ + +/* + * Copyright (c) 2001, 2002 Leon Woestenberg + * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * Martin Hentschel + * + */ +#ifndef LWIP_HDR_APPS_SNMP_H +#define LWIP_HDR_APPS_SNMP_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/apps/snmp_core.h" + +/** SNMP variable binding descriptor (publically needed for traps) */ +struct snmp_varbind +{ + /** pointer to next varbind, NULL for last in list */ + struct snmp_varbind *next; + /** pointer to previous varbind, NULL for first in list */ + struct snmp_varbind *prev; + + /** object identifier */ + struct snmp_obj_id oid; + + /** value ASN1 type */ + u8_t type; + /** object value length */ + u16_t value_len; + /** object value */ + void *value; +}; + +/** + * @ingroup snmp_core + * Agent setup, start listening to port 161. + */ +void snmp_init(void); +void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); + +void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); +const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); + +void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); +void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); + +/** Generic trap: cold start */ +#define SNMP_GENTRAP_COLDSTART 0 +/** Generic trap: warm start */ +#define SNMP_GENTRAP_WARMSTART 1 +/** Generic trap: link down */ +#define SNMP_GENTRAP_LINKDOWN 2 +/** Generic trap: link up */ +#define SNMP_GENTRAP_LINKUP 3 +/** Generic trap: authentication failure */ +#define SNMP_GENTRAP_AUTH_FAILURE 4 +/** Generic trap: EGP neighbor lost */ +#define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 +/** Generic trap: enterprise specific */ +#define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 + +err_t snmp_send_trap_generic(s32_t generic_trap); +err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); +err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); + +#define SNMP_AUTH_TRAPS_DISABLED 0 +#define SNMP_AUTH_TRAPS_ENABLED 1 +void snmp_set_auth_traps_enabled(u8_t enable); +u8_t snmp_get_auth_traps_enabled(void); + +u8_t snmp_v1_enabled(void); +u8_t snmp_v2c_enabled(void); +u8_t snmp_v3_enabled(void); +void snmp_v1_enable(u8_t enable); +void snmp_v2c_enable(u8_t enable); +void snmp_v3_enable(u8_t enable); + +const char * snmp_get_community(void); +const char * snmp_get_community_write(void); +const char * snmp_get_community_trap(void); +void snmp_set_community(const char * const community); +void snmp_set_community_write(const char * const community); +void snmp_set_community_trap(const char * const community); + +void snmp_coldstart_trap(void); +void snmp_authfail_trap(void); + +typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); +void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h new file mode 100644 index 0000000..4748df6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_core.h @@ -0,0 +1,377 @@ +/** + * @file + * SNMP core API for implementing MIBs + */ + +/* + * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Christiaan Simons + * Martin Hentschel + */ + +#ifndef LWIP_HDR_APPS_SNMP_CORE_H +#define LWIP_HDR_APPS_SNMP_CORE_H + +#include "lwip/apps/snmp_opts.h" + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* basic ASN1 defines */ +#define SNMP_ASN1_CLASS_UNIVERSAL 0x00 +#define SNMP_ASN1_CLASS_APPLICATION 0x40 +#define SNMP_ASN1_CLASS_CONTEXT 0x80 +#define SNMP_ASN1_CLASS_PRIVATE 0xC0 + +#define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 +#define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 + +/* universal tags (from ASN.1 spec.) */ +#define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 +#define SNMP_ASN1_UNIVERSAL_INTEGER 2 +#define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 +#define SNMP_ASN1_UNIVERSAL_NULL 5 +#define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 +#define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 + +/* application specific (SNMP) tags (from SNMPv2-SMI) */ +#define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ +#define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ +#define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ +#define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ + +/* context specific (SNMP) tags (from RFC 1905) */ +#define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 + +/* full ASN1 type defines */ +#define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) +#define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) +#define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) +#define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) +#define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) +#define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) +#define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) +#define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR +#define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) +#define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER +#define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) +#define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE +#define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) +#define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) +#if LWIP_HAVE_INT64 +#define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) +#endif + +#define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 +#define SNMP_VARBIND_EXCEPTION_MASK 0x0F + +/** error codes predefined by SNMP prot. */ +typedef enum { + SNMP_ERR_NOERROR = 0, +/* +outdated v1 error codes. do not use anmore! +#define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead +#define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead +#define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead +*/ + SNMP_ERR_GENERROR = 5, + SNMP_ERR_NOACCESS = 6, + SNMP_ERR_WRONGTYPE = 7, + SNMP_ERR_WRONGLENGTH = 8, + SNMP_ERR_WRONGENCODING = 9, + SNMP_ERR_WRONGVALUE = 10, + SNMP_ERR_NOCREATION = 11, + SNMP_ERR_INCONSISTENTVALUE = 12, + SNMP_ERR_RESOURCEUNAVAILABLE = 13, + SNMP_ERR_COMMITFAILED = 14, + SNMP_ERR_UNDOFAILED = 15, + SNMP_ERR_NOTWRITABLE = 17, + SNMP_ERR_INCONSISTENTNAME = 18, + + SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE +} snmp_err_t; + +/** internal object identifier representation */ +struct snmp_obj_id +{ + u8_t len; + u32_t id[SNMP_MAX_OBJ_ID_LEN]; +}; + +struct snmp_obj_id_const_ref +{ + u8_t len; + const u32_t* id; +}; + +extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ + +/** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ +union snmp_variant_value +{ + void* ptr; + const void* const_ptr; + u32_t u32; + s32_t s32; +#if LWIP_HAVE_INT64 + u64_t u64; +#endif +}; + + +/** +SNMP MIB node types + tree node is the only node the stack can process in order to walk the tree, + all other nodes are assumed to be leaf nodes. + This cannot be an enum because users may want to define their own node types. +*/ +#define SNMP_NODE_TREE 0x00 +/* predefined leaf node types */ +#define SNMP_NODE_SCALAR 0x01 +#define SNMP_NODE_SCALAR_ARRAY 0x02 +#define SNMP_NODE_TABLE 0x03 +#define SNMP_NODE_THREADSYNC 0x04 + +/** node "base class" layout, the mandatory fields for a node */ +struct snmp_node +{ + /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ + u8_t node_type; + /** the number assigned to this node which used as part of the full OID */ + u32_t oid; +}; + +/** SNMP node instance access types */ +typedef enum { + SNMP_NODE_INSTANCE_ACCESS_READ = 1, + SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, + SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, + SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), + SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, + SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 +} snmp_access_t; + +struct snmp_node_instance; + +typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); +typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); +typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); +typedef void (*node_instance_release_method)(struct snmp_node_instance*); + +#define SNMP_GET_VALUE_RAW_DATA 0x4000 /* do not use 0x8000 because return value of node_instance_get_value_method is signed16 and 0x8000 would be the signed bit */ + +/** SNMP node instance */ +struct snmp_node_instance +{ + /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ + const struct snmp_node* node; + /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ + struct snmp_obj_id instance_oid; + + /** ASN type for this object (see snmp_asn1.h for definitions) */ + u8_t asn1_type; + /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ + snmp_access_t access; + + /** returns object value for the given object identifier. Return values <0 to indicate an error */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; + /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ + node_instance_release_method release_instance; + + /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ + union snmp_variant_value reference; + /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ + u32_t reference_len; +}; + + +/** SNMP tree node */ +struct snmp_tree_node +{ + /** inherited "base class" members */ + struct snmp_node node; + u16_t subnode_count; + const struct snmp_node* const *subnodes; +}; + +#define SNMP_CREATE_TREE_NODE(oid, subnodes) \ + {{ SNMP_NODE_TREE, (oid) }, \ + (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } + +#define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ + {{ SNMP_NODE_TREE, (oid) }, \ + 0, NULL } + +/** SNMP leaf node */ +struct snmp_leaf_node +{ + /** inherited "base class" members */ + struct snmp_node node; + snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +}; + +/** represents a single mib with its base oid and root node */ +struct snmp_mib +{ + const u32_t *base_oid; + u8_t base_oid_len; + const struct snmp_node *root_node; +}; + +#define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } + +/** OID range structure */ +struct snmp_oid_range +{ + u32_t min; + u32_t max; +}; + +/** checks if incoming OID length and values are in allowed ranges */ +u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); + +typedef enum { + SNMP_NEXT_OID_STATUS_SUCCESS, + SNMP_NEXT_OID_STATUS_NO_MATCH, + SNMP_NEXT_OID_STATUS_BUF_TO_SMALL +} snmp_next_oid_status_t; + +/** state for next_oid_init / next_oid_check functions */ +struct snmp_next_oid_state +{ + const u32_t* start_oid; + u8_t start_oid_len; + + u32_t* next_oid; + u8_t next_oid_len; + u8_t next_oid_max_len; + + snmp_next_oid_status_t status; + void* reference; +}; + +void snmp_next_oid_init(struct snmp_next_oid_state *state, + const u32_t *start_oid, u8_t start_oid_len, + u32_t *next_oid_buf, u8_t next_oid_max_len); +u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len); +u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, u8_t oid_len, void* reference); + +void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); +u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); +s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); + +#if LWIP_IPV4 +u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); +void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); +void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); +#endif /* LWIP_IPV6 */ +#if LWIP_IPV4 || LWIP_IPV6 +u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); +u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); + +u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); +u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); +#endif /* LWIP_IPV4 || LWIP_IPV6 */ + +struct netif; +u8_t netif_to_num(const struct netif *netif); + +snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ + +err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); +err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); +u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); +u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); + +struct snmp_statistics +{ + u32_t inpkts; + u32_t outpkts; + u32_t inbadversions; + u32_t inbadcommunitynames; + u32_t inbadcommunityuses; + u32_t inasnparseerrs; + u32_t intoobigs; + u32_t innosuchnames; + u32_t inbadvalues; + u32_t inreadonlys; + u32_t ingenerrs; + u32_t intotalreqvars; + u32_t intotalsetvars; + u32_t ingetrequests; + u32_t ingetnexts; + u32_t insetrequests; + u32_t ingetresponses; + u32_t intraps; + u32_t outtoobigs; + u32_t outnosuchnames; + u32_t outbadvalues; + u32_t outgenerrs; + u32_t outgetrequests; + u32_t outgetnexts; + u32_t outsetrequests; + u32_t outgetresponses; + u32_t outtraps; +#if LWIP_SNMP_V3 + u32_t unsupportedseclevels; + u32_t notintimewindows; + u32_t unknownusernames; + u32_t unknownengineids; + u32_t wrongdigests; + u32_t decryptionerrors; +#endif +}; + +extern struct snmp_statistics snmp_stats; + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SNMP */ + +#endif /* LWIP_HDR_APPS_SNMP_CORE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h new file mode 100644 index 0000000..392ee25 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_mib2.h @@ -0,0 +1,78 @@ +/** + * @file + * SNMP MIB2 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_APPS_SNMP_MIB2_H +#define LWIP_HDR_APPS_SNMP_MIB2_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ +#if SNMP_LWIP_MIB2 + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib mib2; + +#if SNMP_USE_NETCONN +#include "lwip/apps/snmp_threadsync.h" +void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); +extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; +#endif + +#ifndef SNMP_SYSSERVICES +#define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) +#endif + +void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ +void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); +void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); +void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); + +#endif /* SNMP_LWIP_MIB2 */ +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_MIB2_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h new file mode 100644 index 0000000..1c63504 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_opts.h @@ -0,0 +1,297 @@ +/** + * @file + * SNMP server options list + */ + +/* + * Copyright (c) 2015 Dirk Ziegelmeier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ +#ifndef LWIP_HDR_SNMP_OPTS_H +#define LWIP_HDR_SNMP_OPTS_H + +#include "lwip/opt.h" + +/** + * @defgroup snmp_opts Options + * @ingroup snmp + * @{ + */ + +/** + * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available + * for SNMP transport. + * If you want to use your own SNMP agent, leave this disabled. + * To integrate MIB2 of an external agent, you need to enable + * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks + * and statistics counters you need to get MIB2 working. + */ +#if !defined LWIP_SNMP || defined __DOXYGEN__ +#define LWIP_SNMP 0 +#endif + +/** + * SNMP_USE_NETCONN: Use netconn API instead of raw API. + * Makes SNMP agent run in a worker thread, so blocking operations + * can be done in MIB calls. + */ +#if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ +#define SNMP_USE_NETCONN 0 +#endif + +/** + * SNMP_USE_RAW: Use raw API. + * SNMP agent does not run in a worker thread, so blocking operations + * should not be done in MIB calls. + */ +#if !defined SNMP_USE_RAW || defined __DOXYGEN__ +#define SNMP_USE_RAW 1 +#endif + +#if SNMP_USE_NETCONN && SNMP_USE_RAW +#error SNMP stack can use only one of the APIs {raw, netconn} +#endif + +#if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW +#error SNMP stack needs a receive API and UDP {raw, netconn} +#endif + +#if SNMP_USE_NETCONN +/** + * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread + */ +#if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ +#define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE +#endif + +/** + * SNMP_THREAD_PRIO: SNMP netconn worker thread priority + */ +#if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ +#define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO +#endif +#endif /* SNMP_USE_NETCONN */ + +/** + * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap + * destination is required + */ +#if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ +#define SNMP_TRAP_DESTINATIONS 1 +#endif + +/** + * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not + * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). + * Unsafe requests are disabled by default! + */ +#if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ +#define SNMP_SAFE_REQUESTS 1 +#endif + +/** + * The maximum length of strings used. + */ +#if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OCTET_STRING_LEN 127 +#endif + +/** + * The maximum number of Sub ID's inside an object identifier. + * Indirectly this also limits the maximum depth of SNMP tree. + */ +#if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ +#define SNMP_MAX_OBJ_ID_LEN 50 +#endif + +#if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ +/** + * The minimum size of a value. + */ +#define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ +/** + * The maximum size of a value. + */ +#define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) +#endif + +/** + * The snmp read-access community. Used for write-access and traps, too + * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. + */ +#if !defined SNMP_COMMUNITY || defined __DOXYGEN__ +#define SNMP_COMMUNITY "public" +#endif + +/** + * The snmp write-access community. + * Set this community to "" in order to disallow any write access. + */ +#if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ +#define SNMP_COMMUNITY_WRITE "private" +#endif + +/** + * The snmp community used for sending traps. + */ +#if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ +#define SNMP_COMMUNITY_TRAP "public" +#endif + +/** + * The maximum length of community string. + * If community names shall be adjusted at runtime via snmp_set_community() calls, + * enter here the possible maximum length (+1 for terminating null character). + */ +#if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ +#define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) +#endif + +/** + * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. + */ +#if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ +#define SNMP_LWIP_ENTERPRISE_OID 26381 +/** + * IANA assigned enterprise ID for lwIP is 26381 + * @see http://www.iana.org/assignments/enterprise-numbers + * + * @note this enterprise ID is assigned to the lwIP project, + * all object identifiers living under this ID are assigned + * by the lwIP maintainers! + * @note don't change this define, use snmp_set_device_enterprise_oid() + * + * If you need to create your own private MIB you'll need + * to apply for your own enterprise ID with IANA: + * http://www.iana.org/numbers.html + */ +#define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} +/** + * Length of SNMP_DEVICE_ENTERPRISE_OID + */ +#define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 +#endif + +/** + * SNMP_DEBUG: Enable debugging for SNMP messages. + */ +#if !defined SNMP_DEBUG || defined __DOXYGEN__ +#define SNMP_DEBUG LWIP_DBG_OFF +#endif + +/** + * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. + */ +#if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ +#define SNMP_MIB_DEBUG LWIP_DBG_OFF +#endif + +/** + * Indicates if the MIB2 implementation of LWIP SNMP stack is used. + */ +#if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2 LWIP_SNMP +#endif + +/** + * Value return for sysDesc field of MIB2. + */ +#if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSDESC "lwIP" +#endif + +/** + * Value return for sysName field of MIB2. + * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" +#endif + +/** + * Value return for sysContact field of MIB2. + * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSCONTACT "" +#endif + +/** + * Value return for sysLocation field of MIB2. + * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. + */ +#if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ +#define SNMP_LWIP_MIB2_SYSLOCATION "" +#endif + +/** + * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). + * This may be useful to limit the load for a single request. + * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. + * so the effect is that the client will do more requests to gather all data. + * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many + * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. + */ +#if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ +#define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 +#endif + +/** + * @} + */ + +/* + ------------------------------------ + ---------- SNMPv3 options ---------- + ------------------------------------ +*/ + +/** + * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must + * also be enabled. + * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. + */ +#ifndef LWIP_SNMP_V3 +#define LWIP_SNMP_V3 0 +#endif + +#ifndef LWIP_SNMP_V3_MBEDTLS +#define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 +#endif + +#ifndef LWIP_SNMP_V3_CRYPTO +#define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3_MBEDTLS +#endif + +#ifndef LWIP_SNMP_CONFIGURE_VERSIONS +#define LWIP_SNMP_CONFIGURE_VERSIONS 0 +#endif + +#endif /* LWIP_HDR_SNMP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h new file mode 100644 index 0000000..99bcdf9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_scalar.h @@ -0,0 +1,113 @@ +/** + * @file + * SNMP server MIB API to implement scalar nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_SCALAR_H +#define LWIP_HDR_APPS_SNMP_SCALAR_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** basic scalar node */ +struct snmp_scalar_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u8_t asn1_type; + snmp_access_t access; + node_instance_get_value_method get_value; + node_instance_set_test_method set_test; + node_instance_set_value_method set_value; +}; + + +snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR, (oid) }, \ + snmp_scalar_get_instance, \ + snmp_scalar_get_next_instance }, \ + (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } + +#define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) + +/** scalar array node - a tree node which contains scalars only as children */ +struct snmp_scalar_array_node_def +{ + u32_t oid; + u8_t asn1_type; + snmp_access_t access; +}; + +typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); +typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); +typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); + +/** basic scalar array node */ +struct snmp_scalar_array_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t array_node_count; + const struct snmp_scalar_array_node_def* array_nodes; + snmp_scalar_array_get_value_method get_value; + snmp_scalar_array_set_test_method set_test; + snmp_scalar_array_set_value_method set_value; +}; + +snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ + snmp_scalar_array_get_instance, \ + snmp_scalar_array_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h new file mode 100644 index 0000000..9f4b41c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_framework.h @@ -0,0 +1,32 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H +#define LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_obj_id usmNoAuthProtocol; +extern const struct snmp_obj_id usmHMACMD5AuthProtocol; +extern const struct snmp_obj_id usmHMACSHAAuthProtocol; + +extern const struct snmp_obj_id usmNoPrivProtocol; +extern const struct snmp_obj_id usmDESPrivProtocol; +extern const struct snmp_obj_id usmAESPrivProtocol; + +extern const struct snmp_mib snmpframeworkmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_FRAMEWORK_MIB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h new file mode 100644 index 0000000..ae09baa --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_snmpv2_usm.h @@ -0,0 +1,24 @@ +/* +Generated by LwipMibCompiler +*/ + +#ifndef LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H +#define LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H + +#include "lwip/apps/snmp_opts.h" +#if LWIP_SNMP + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "lwip/apps/snmp_core.h" + +extern const struct snmp_mib snmpusmmib; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LWIP_SNMP */ +#endif /* LWIP_HDR_APPS_SNMP_USER_BASED_SM_MIB_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h new file mode 100644 index 0000000..6930d0b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_table.h @@ -0,0 +1,134 @@ +/** + * @file + * SNMP server MIB API to implement table nodes + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Martin Hentschel + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_TABLE_H +#define LWIP_HDR_APPS_SNMP_TABLE_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/apps/snmp_core.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +/** default (customizable) read/write table */ +struct snmp_table_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_access_t access; +}; + +/** table node */ +struct snmp_table_node +{ + /** inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_col_def* columns; + snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); + snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); + /** returns object value for the given object identifier */ + node_instance_get_value_method get_value; + /** tests length and/or range BEFORE setting */ + node_instance_set_test_method set_test; + /** sets object value, only called when set_test() was successful */ + node_instance_set_value_method set_value; +}; + +snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_get_instance, \ + snmp_table_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ + (get_cell_instance_method), (get_next_cell_instance_method), \ + (get_value_method), (set_test_method), (set_value_method)} + +#define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ + + +/** simple read-only table */ +typedef enum { + SNMP_VARIANT_VALUE_TYPE_U32, + SNMP_VARIANT_VALUE_TYPE_S32, + SNMP_VARIANT_VALUE_TYPE_PTR, + SNMP_VARIANT_VALUE_TYPE_CONST_PTR +} snmp_table_column_data_type_t; + +struct snmp_table_simple_col_def +{ + u32_t index; + u8_t asn1_type; + snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ +}; + +/** simple read-only table node */ +struct snmp_table_simple_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + u16_t column_count; + const struct snmp_table_simple_col_def* columns; + snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); + snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); +}; + +snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +#define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ + {{{ SNMP_NODE_TABLE, (oid) }, \ + snmp_table_simple_get_instance, \ + snmp_table_simple_get_next_instance }, \ + (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } + +s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); +s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_TABLE_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h new file mode 100644 index 0000000..5eb3538 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmp_threadsync.h @@ -0,0 +1,114 @@ +/** + * @file + * SNMP server MIB API to implement thread synchronization + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Dirk Ziegelmeier + * + */ + +#ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H +#define LWIP_HDR_APPS_SNMP_THREADSYNC_H + +#include "lwip/apps/snmp_opts.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/apps/snmp_core.h" +#include "lwip/sys.h" + +typedef void (*snmp_threadsync_called_fn)(void* arg); +typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); + + +/** Thread sync runtime data. For internal usage only. */ +struct threadsync_data +{ + union { + snmp_err_t err; + s16_t s16; + } retval; + union { + const u32_t *root_oid; + void *value; + } arg1; + union { + u8_t root_oid_len; + u16_t len; + } arg2; + const struct snmp_threadsync_node *threadsync_node; + struct snmp_node_instance proxy_instance; +}; + +/** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ +struct snmp_threadsync_instance +{ + sys_sem_t sem; + sys_mutex_t sem_usage_mutex; + snmp_threadsync_synchronizer_fn sync_fn; + struct threadsync_data data; +}; + +/** SNMP thread sync proxy leaf node */ +struct snmp_threadsync_node +{ + /* inherited "base class" members */ + struct snmp_leaf_node node; + + const struct snmp_leaf_node *target; + struct snmp_threadsync_instance *instance; +}; + +snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); +snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); + +/** Create thread sync proxy node */ +#define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ + {{{ SNMP_NODE_THREADSYNC, (oid) }, \ + snmp_threadsync_get_instance, \ + snmp_threadsync_get_next_instance }, \ + (target_leaf_node), \ + (threadsync_instance) } + +/** Create thread sync instance data */ +void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); + +#endif /* LWIP_SNMP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h new file mode 100644 index 0000000..bd7d314 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/snmpv3.h @@ -0,0 +1,114 @@ +/** + * @file + * Additional SNMPv3 functionality RFC3414 and RFC3826. + */ + +/* + * Copyright (c) 2016 Elias Oenal. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Elias Oenal + */ + +#ifndef LWIP_HDR_APPS_SNMP_V3_H +#define LWIP_HDR_APPS_SNMP_V3_H + +#include "lwip/apps/snmp_opts.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_SNMP && LWIP_SNMP_V3 + +typedef enum +{ + SNMP_V3_AUTH_ALGO_INVAL = 0, + SNMP_V3_AUTH_ALGO_MD5 = 1, + SNMP_V3_AUTH_ALGO_SHA = 2 +} snmpv3_auth_algo_t; + +typedef enum +{ + SNMP_V3_PRIV_ALGO_INVAL = 0, + SNMP_V3_PRIV_ALGO_DES = 1, + SNMP_V3_PRIV_ALGO_AES = 2 +} snmpv3_priv_algo_t; + +typedef enum +{ + SNMP_V3_USER_STORAGETYPE_OTHER = 1, + SNMP_V3_USER_STORAGETYPE_VOLATILE = 2, + SNMP_V3_USER_STORAGETYPE_NONVOLATILE = 3, + SNMP_V3_USER_STORAGETYPE_PERMANENT = 4, + SNMP_V3_USER_STORAGETYPE_READONLY = 5 +} snmpv3_user_storagetype_t; + +/* + * The following callback functions must be implemented by the application. + * There is a dummy implementation in snmpv3_dummy.c. + */ + +void snmpv3_get_engine_id(const char **id, u8_t *len); +err_t snmpv3_set_engine_id(const char* id, u8_t len); + +u32_t snmpv3_get_engine_boots(void); +void snmpv3_set_engine_boots(u32_t boots); + +u32_t snmpv3_get_engine_time(void); +void snmpv3_reset_engine_time(void); + +err_t snmpv3_get_user(const char* username, snmpv3_auth_algo_t *auth_algo, u8_t *auth_key, snmpv3_priv_algo_t *priv_algo, u8_t *priv_key); +u8_t snmpv3_get_amount_of_users(void); +err_t snmpv3_get_user_storagetype(const char *username, snmpv3_user_storagetype_t *storagetype); +err_t snmpv3_get_username(char *username, u8_t index); + +/* The following functions are provided by the SNMPv3 agent */ + +void snmpv3_engine_id_changed(void); +s32_t snmpv3_get_engine_time_internal(void); + +void snmpv3_password_to_key_md5( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 16-octet buffer */ + +void snmpv3_password_to_key_sha( + const u8_t *password, /* IN */ + size_t passwordlen, /* IN */ + const u8_t *engineID, /* IN - pointer to snmpEngineID */ + u8_t engineLength, /* IN - length of snmpEngineID */ + u8_t *key); /* OUT - pointer to caller 20-octet buffer */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNMP_V3_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h new file mode 100644 index 0000000..00135b4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp.h @@ -0,0 +1,80 @@ +/** + * @file + * SNTP client API + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_H +#define LWIP_HDR_APPS_SNTP_H + +#include "lwip/apps/sntp_opts.h" +#include "lwip/ip_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* SNTP operating modes: default is to poll using unicast. + The mode has to be set before calling sntp_init(). */ +#define SNTP_OPMODE_POLL 0 +#define SNTP_OPMODE_LISTENONLY 1 +void sntp_setoperatingmode(u8_t operating_mode); +u8_t sntp_getoperatingmode(void); + +void sntp_init(void); +void sntp_stop(void); +u8_t sntp_enabled(void); + +void sntp_setserver(u8_t idx, const ip_addr_t *addr); +const ip_addr_t* sntp_getserver(u8_t idx); + +#if SNTP_MONITOR_SERVER_REACHABILITY +u8_t sntp_getreachability(u8_t idx); +#endif /* SNTP_MONITOR_SERVER_REACHABILITY */ + +#if SNTP_SERVER_DNS +void sntp_setservername(u8_t idx, const char *server); +const char *sntp_getservername(u8_t idx); +#endif /* SNTP_SERVER_DNS */ + +#if SNTP_GET_SERVERS_FROM_DHCP +void sntp_servermode_dhcp(int set_servers_from_dhcp); +#else /* SNTP_GET_SERVERS_FROM_DHCP */ +#define sntp_servermode_dhcp(x) +#endif /* SNTP_GET_SERVERS_FROM_DHCP */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_SNTP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h new file mode 100644 index 0000000..c25a386 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/sntp_opts.h @@ -0,0 +1,209 @@ +/** + * @file + * SNTP client options list + */ + +/* + * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Frédéric Bernon, Simon Goldschmidt + * + */ +#ifndef LWIP_HDR_APPS_SNTP_OPTS_H +#define LWIP_HDR_APPS_SNTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup sntp_opts Options + * @ingroup sntp + * @{ + */ + +/** SNTP macro to change system time in seconds + * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds + * instead of this one if you need the additional precision. Alternatively, + * define SNTP_SET_SYSTEM_TIME_NTP(sec, frac) in order to work with native + * NTP timestamps instead. + */ +#if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) +#endif + +/** The maximum number of SNTP servers that can be set */ +#if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ +#define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS +#endif + +/** Set this to 1 to implement the callback function called by dhcp when + * NTP servers are received. */ +#if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ +#define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV +#endif + +/** Set this to 1 to support DNS names (or IP address strings) to set sntp servers + * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: + * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" + */ +#if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ +#define SNTP_SERVER_DNS 0 +#endif + +/** + * SNTP_DEBUG: Enable debugging for SNTP. + */ +#if !defined SNTP_DEBUG || defined __DOXYGEN__ +#define SNTP_DEBUG LWIP_DBG_OFF +#endif + +/** SNTP server port */ +#if !defined SNTP_PORT || defined __DOXYGEN__ +#define SNTP_PORT LWIP_IANA_PORT_SNTP +#endif + +/** Sanity check: + * Define this to + * - 0 to turn off sanity checks (default; smaller code) + * - >= 1 to check address and port of the response packet to ensure the + * response comes from the server we sent the request to. + * - >= 2 to check returned Originate Timestamp against Transmit Timestamp + * sent to the server (to ensure response to older request). + * - >= 3 @todo: discard reply if any of the VN, Stratum, or Transmit Timestamp + * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). + * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each + * greater than or equal to 0 and less than infinity, where infinity is + * currently a cozy number like one second. This check avoids using a + * server whose synchronization source has expired for a very long time. + */ +#if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ +#define SNTP_CHECK_RESPONSE 0 +#endif + +/** Enable round-trip delay compensation. + * Compensate for the round-trip delay by calculating the clock offset from + * the originate, receive, transmit and destination timestamps, as per RFC. + * + * The calculation requires compiler support for 64-bit integers. Also, either + * SNTP_SET_SYSTEM_TIME_US or SNTP_SET_SYSTEM_TIME_NTP has to be implemented + * for setting the system clock with sub-second precision. Likewise, either + * SNTP_GET_SYSTEM_TIME or SNTP_GET_SYSTEM_TIME_NTP needs to be implemented + * with sub-second precision. + * + * Although not strictly required, it makes sense to combine this option with + * SNTP_CHECK_RESPONSE >= 2 for sanity-checking of the received timestamps. + * Also, in order for the round-trip calculation to work, the difference + * between the local clock and the NTP server clock must not be larger than + * about 34 years. If that limit is exceeded, the implementation will fall back + * to setting the clock without compensation. In order to ensure that the local + * clock is always within the permitted range for compensation, even at first + * try, it may be necessary to store at least the current year in non-volatile + * memory. + */ +#if !defined SNTP_COMP_ROUNDTRIP || defined __DOXYGEN__ +#define SNTP_COMP_ROUNDTRIP 0 +#endif + +/** According to the RFC, this shall be a random delay + * between 1 and 5 minutes (in milliseconds) to prevent load peaks. + * This can be defined to a random generation function, + * which must return the delay in milliseconds as u32_t. + * Turned off by default. + */ +#if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ +#ifdef LWIP_RAND +#define SNTP_STARTUP_DELAY 1 +#else +#define SNTP_STARTUP_DELAY 0 +#endif +#endif + +/** If you want the startup delay to be a function, define this + * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. + */ +#if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ +#define SNTP_STARTUP_DELAY_FUNC (LWIP_RAND() % 5000) +#endif + +/** SNTP receive timeout - in milliseconds + * Also used as retry timeout - this shouldn't be too low. + * Default is 15 seconds. Must not be beolw 15 seconds by specification (i.e. 15000) + */ +#if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RECV_TIMEOUT 15000 +#endif + +/** SNTP update delay - in milliseconds + * Default is 1 hour. Must not be beolw 60 seconds by specification (i.e. 60000) + */ +#if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ +#define SNTP_UPDATE_DELAY 3600000 +#endif + +/** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 + * to send in request and compare in response. Also used for round-trip + * delay compensation if SNTP_COMP_ROUNDTRIP != 0. + * Alternatively, define SNTP_GET_SYSTEM_TIME_NTP(sec, frac) in order to + * work with native NTP timestamps instead. + */ +#if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ +#define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) +#endif + +/** Default retry timeout (in milliseconds) if the response + * received is invalid. + * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. + */ +#if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT +#endif + +/** Maximum retry timeout (in milliseconds). */ +#if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) +#endif + +/** Increase retry timeout with every retry sent + * Default is on to conform to RFC. + */ +#if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ +#define SNTP_RETRY_TIMEOUT_EXP 1 +#endif + +/** Keep a reachability shift register per server + * Default is on to conform to RFC. + */ +#if !defined SNTP_MONITOR_SERVER_REACHABILITY || defined __DOXYGEN__ +#define SNTP_MONITOR_SERVER_REACHABILITY 1 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_SNTP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h new file mode 100644 index 0000000..d187b82 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_opts.h @@ -0,0 +1,106 @@ +/** + * + * @file tftp_opts.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) implementation options + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_OPTS_H +#define LWIP_HDR_APPS_TFTP_OPTS_H + +#include "lwip/opt.h" +#include "lwip/prot/iana.h" + +/** + * @defgroup tftp_opts Options + * @ingroup tftp + * @{ + */ + +/** + * Enable TFTP debug messages + */ +#if !defined TFTP_DEBUG || defined __DOXYGEN__ +#define TFTP_DEBUG LWIP_DBG_OFF +#endif + +/** + * TFTP server port + */ +#if !defined TFTP_PORT || defined __DOXYGEN__ +#define TFTP_PORT LWIP_IANA_PORT_TFTP +#endif + +/** + * TFTP timeout + */ +#if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ +#define TFTP_TIMEOUT_MSECS 10000 +#endif + +/** + * Max. number of retries when a file is read from server + */ +#if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ +#define TFTP_MAX_RETRIES 5 +#endif + +/** + * TFTP timer cyclic interval + */ +#if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ +#define TFTP_TIMER_MSECS (TFTP_TIMEOUT_MSECS / 10) +#endif + +/** + * Max. length of TFTP filename + */ +#if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ +#define TFTP_MAX_FILENAME_LEN 20 +#endif + +/** + * Max. length of TFTP mode + */ +#if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ +#define TFTP_MAX_MODE_LEN 7 +#endif + +/** + * @} + */ + +#endif /* LWIP_HDR_APPS_TFTP_OPTS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h new file mode 100644 index 0000000..3ce5e40 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/apps/tftp_server.h @@ -0,0 +1,95 @@ +/** + * + * @file tftp_server.h + * + * @author Logan Gunthorpe + * + * @brief Trivial File Transfer Protocol (RFC 1350) + * + * Copyright (c) Deltatee Enterprises Ltd. 2013 + * All rights reserved. + * + */ + +/* + * Redistribution and use in source and binary forms, with or without + * modification,are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Author: Logan Gunthorpe + * + */ + +#ifndef LWIP_HDR_APPS_TFTP_SERVER_H +#define LWIP_HDR_APPS_TFTP_SERVER_H + +#include "lwip/apps/tftp_opts.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @ingroup tftp + * TFTP context containing callback functions for TFTP transfers + */ +struct tftp_context { + /** + * Open file for read/write. + * @param fname Filename + * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) + * @param write Flag indicating read (0) or write (!= 0) access + * @returns File handle supplied to other functions + */ + void* (*open)(const char* fname, const char* mode, u8_t write); + /** + * Close file handle + * @param handle File handle returned by open() + */ + void (*close)(void* handle); + /** + * Read from file + * @param handle File handle returned by open() + * @param buf Target buffer to copy read data to + * @param bytes Number of bytes to copy to buf + * @returns >= 0: Success; < 0: Error + */ + int (*read)(void* handle, void* buf, int bytes); + /** + * Write to file + * @param handle File handle returned by open() + * @param pbuf PBUF adjusted such that payload pointer points + * to the beginning of write data. In other words, + * TFTP headers are stripped off. + * @returns >= 0: Success; < 0: Error + */ + int (*write)(void* handle, struct pbuf* p); +}; + +err_t tftp_init(const struct tftp_context* ctx); +void tftp_cleanup(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_APPS_TFTP_SERVER_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h new file mode 100644 index 0000000..d281da7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/arch.h @@ -0,0 +1,393 @@ +/** + * @file + * Support for different processor and compiler architectures + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ARCH_H +#define LWIP_HDR_ARCH_H + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 1234 +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 4321 +#endif + +#include "arch/cc.h" + +/** + * @defgroup compiler_abstraction Compiler/platform abstraction + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/cc.h! + * If the compiler does not provide memset() this file must include a + * definition of it, or include a file which defines it. + * These options cannot be \#defined in lwipopts.h since they are not options + * of lwIP itself, but options of the lwIP port to your system. + * @{ + */ + +/** Define the byte order of the system. + * Needed for conversion of network data to host byte order. + * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN + */ +#ifndef BYTE_ORDER +#define BYTE_ORDER LITTLE_ENDIAN +#endif + +/** Define random number generator function of your system */ +#ifdef __DOXYGEN__ +#define LWIP_RAND() ((u32_t)rand()) +#endif + +/** Platform specific diagnostic output.\n + * Note the default implementation pulls in printf, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_DIAG +#define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) +#include +#include +#endif + +/** Platform specific assertion handling.\n + * Note the default implementation pulls in printf, fflush and abort, which may + * in turn pull in a lot of standard libary code. In resource-constrained + * systems, this should be defined to something less resource-consuming. + */ +#ifndef LWIP_PLATFORM_ASSERT +#define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ + x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) +#include +#include +#endif + +/** Define this to 1 in arch/cc.h of your port if you do not want to + * include stddef.h header to get size_t. You need to typedef size_t + * by yourself in this case. + */ +#ifndef LWIP_NO_STDDEF_H +#define LWIP_NO_STDDEF_H 0 +#endif + +#if !LWIP_NO_STDDEF_H +#include /* for size_t */ +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the stdint.h header. You need to typedef the generic types listed in + * lwip/arch.h yourself in this case (u8_t, u16_t...). + */ +#ifndef LWIP_NO_STDINT_H +#define LWIP_NO_STDINT_H 0 +#endif + +/* Define generic types used in lwIP */ +#if !LWIP_NO_STDINT_H +#include +/* stdint.h is C99 which should also provide support for 64-bit integers */ +#if !defined(LWIP_HAVE_INT64) && defined(UINT64_MAX) +#define LWIP_HAVE_INT64 1 +#endif +typedef uint8_t u8_t; +typedef int8_t s8_t; +typedef uint16_t u16_t; +typedef int16_t s16_t; +typedef uint32_t u32_t; +typedef int32_t s32_t; +#if LWIP_HAVE_INT64 +typedef uint64_t u64_t; +typedef int64_t s64_t; +#endif +typedef uintptr_t mem_ptr_t; +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the inttypes.h header. You need to define the format strings listed in + * lwip/arch.h yourself in this case (X8_F, U16_F...). + */ +#ifndef LWIP_NO_INTTYPES_H +#define LWIP_NO_INTTYPES_H 0 +#endif + +/* Define (sn)printf formatters for these lwIP types */ +#if !LWIP_NO_INTTYPES_H +#include +#ifndef X8_F +#define X8_F "02" PRIx8 +#endif +#ifndef U16_F +#define U16_F PRIu16 +#endif +#ifndef S16_F +#define S16_F PRId16 +#endif +#ifndef X16_F +#define X16_F PRIx16 +#endif +#ifndef U32_F +#define U32_F PRIu32 +#endif +#ifndef S32_F +#define S32_F PRId32 +#endif +#ifndef X32_F +#define X32_F PRIx32 +#endif +#ifndef SZT_F +#define SZT_F PRIuPTR +#endif +#endif + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the limits.h header. You need to define the type limits yourself in this case + * (e.g. INT_MAX, SSIZE_MAX). + */ +#ifndef LWIP_NO_LIMITS_H +#define LWIP_NO_LIMITS_H 0 +#endif + +/* Include limits.h? */ +#if !LWIP_NO_LIMITS_H +#include +#endif + +/* Do we need to define ssize_t? This is a compatibility hack: + * Unfortunately, this type seems to be unavailable on some systems (even if + * sys/types or unistd.h are available). + * Being like that, we define it to 'int' if SSIZE_MAX is not defined. + */ +#ifdef SSIZE_MAX +/* If SSIZE_MAX is defined, unistd.h should provide the type as well */ +#ifndef LWIP_NO_UNISTD_H +#define LWIP_NO_UNISTD_H 0 +#endif +#if !LWIP_NO_UNISTD_H +#include +#endif +#else /* SSIZE_MAX */ +typedef int ssize_t; +#define SSIZE_MAX INT_MAX +#endif /* SSIZE_MAX */ + +/* some maximum values needed in lwip code */ +#define LWIP_UINT32_MAX 0xffffffff + +/** Define this to 1 in arch/cc.h of your port if your compiler does not provide + * the ctype.h header. If ctype.h is available, a few character functions + * are mapped to the appropriate functions (lwip_islower, lwip_isdigit...), if + * not, a private implementation is provided. + */ +#ifndef LWIP_NO_CTYPE_H +#define LWIP_NO_CTYPE_H 0 +#endif + +#if LWIP_NO_CTYPE_H +#define lwip_in_range(c, lo, up) ((u8_t)(c) >= (lo) && (u8_t)(c) <= (up)) +#define lwip_isdigit(c) lwip_in_range((c), '0', '9') +#define lwip_isxdigit(c) (lwip_isdigit(c) || lwip_in_range((c), 'a', 'f') || lwip_in_range((c), 'A', 'F')) +#define lwip_islower(c) lwip_in_range((c), 'a', 'z') +#define lwip_isspace(c) ((c) == ' ' || (c) == '\f' || (c) == '\n' || (c) == '\r' || (c) == '\t' || (c) == '\v') +#define lwip_isupper(c) lwip_in_range((c), 'A', 'Z') +#define lwip_tolower(c) (lwip_isupper(c) ? (c) - 'A' + 'a' : c) +#define lwip_toupper(c) (lwip_islower(c) ? (c) - 'a' + 'A' : c) +#else +#include +#define lwip_isdigit(c) isdigit((unsigned char)(c)) +#define lwip_isxdigit(c) isxdigit((unsigned char)(c)) +#define lwip_islower(c) islower((unsigned char)(c)) +#define lwip_isspace(c) isspace((unsigned char)(c)) +#define lwip_isupper(c) isupper((unsigned char)(c)) +#define lwip_tolower(c) tolower((unsigned char)(c)) +#define lwip_toupper(c) toupper((unsigned char)(c)) +#endif + +/** C++ const_cast (val) equivalent to remove constness from a value (GCC -Wcast-qual) */ +#ifndef LWIP_CONST_CAST +#define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) +#endif + +/** Get rid of alignment cast warnings (GCC -Wcast-align) */ +#ifndef LWIP_ALIGNMENT_CAST +#define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Get rid of warnings related to pointer-to-numeric and vice-versa casts, + * e.g. "conversion from 'u8_t' to 'void *' of greater size" + */ +#ifndef LWIP_PTR_NUMERIC_CAST +#define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Avoid warnings/errors related to implicitly casting away packed attributes by doing a explicit cast */ +#ifndef LWIP_PACKED_CAST +#define LWIP_PACKED_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) +#endif + +/** Allocates a memory buffer of specified size that is of sufficient size to align + * its start address using LWIP_MEM_ALIGN. + * You can declare your own version here e.g. to enforce alignment without adding + * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement + * requirements.\n + * e.g. if you use gcc and need 32 bit alignment:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n + * or more portable:\n + * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] + */ +#ifndef LWIP_DECLARE_MEMORY_ALIGNED +#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] +#endif + +/** Calculate memory size for an aligned buffer - returns the next highest + * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and + * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). + */ +#ifndef LWIP_MEM_ALIGN_SIZE +#define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) +#endif + +/** Calculate safe memory size for an aligned buffer when using an unaligned + * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the + * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) + */ +#ifndef LWIP_MEM_ALIGN_BUFFER +#define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) +#endif + +/** Align a memory pointer to the alignment defined by MEM_ALIGNMENT + * so that ADDR % MEM_ALIGNMENT == 0 + */ +#ifndef LWIP_MEM_ALIGN +#define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** Packed structs support. + * Placed BEFORE declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_BEGIN +#define PACK_STRUCT_BEGIN +#endif /* PACK_STRUCT_BEGIN */ + +/** Packed structs support. + * Placed AFTER declaration of a packed struct.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_END +#define PACK_STRUCT_END +#endif /* PACK_STRUCT_END */ + +/** Packed structs support. + * Placed between end of declaration of a packed struct and trailing semicolon.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_STRUCT +#if defined(__GNUC__) || defined(__clang__) +#define PACK_STRUCT_STRUCT __attribute__((packed)) +#else +#define PACK_STRUCT_STRUCT +#endif +#endif /* PACK_STRUCT_STRUCT */ + +/** Packed structs support. + * Wraps u32_t and u16_t members.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FIELD +#define PACK_STRUCT_FIELD(x) x +#endif /* PACK_STRUCT_FIELD */ + +/** Packed structs support. + * Wraps u8_t members, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_8 +#define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_8 */ + +/** Packed structs support. + * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifndef PACK_STRUCT_FLD_S +#define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) +#endif /* PACK_STRUCT_FLD_S */ + +/** PACK_STRUCT_USE_INCLUDES==1: Packed structs support using \#include files before and after struct to be packed.\n + * The file included BEFORE the struct is "arch/bpstruct.h".\n + * The file included AFTER the struct is "arch/epstruct.h".\n + * This can be used to implement struct packing on MS Visual C compilers, see + * the Win32 port in the lwIP contrib repository for reference. + * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n + * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. + */ +#ifdef __DOXYGEN__ +#define PACK_STRUCT_USE_INCLUDES +#endif + +/** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ +#ifndef LWIP_UNUSED_ARG +#define LWIP_UNUSED_ARG(x) (void)x +#endif /* LWIP_UNUSED_ARG */ + +/** LWIP_PROVIDE_ERRNO==1: Let lwIP provide ERRNO values and the 'errno' variable. + * If this is disabled, cc.h must either define 'errno', include , + * define LWIP_ERRNO_STDINCLUDE to get included or + * define LWIP_ERRNO_INCLUDE to or equivalent. + */ +#if defined __DOXYGEN__ +#define LWIP_PROVIDE_ERRNO +#endif + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ARCH_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h new file mode 100644 index 0000000..5c6d13e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/autoip.h @@ -0,0 +1,99 @@ +/** + * @file + * + * AutoIP Automatic LinkLocal IP Configuration + */ + +/* + * + * Copyright (c) 2007 Dominik Spies + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * Author: Dominik Spies + * + * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform + * with RFC 3927. + * + */ + +#ifndef LWIP_HDR_AUTOIP_H +#define LWIP_HDR_AUTOIP_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +/* #include "lwip/udp.h" */ +#include "lwip/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** AutoIP Timing */ +#define AUTOIP_TMR_INTERVAL 100 +#define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) + +/** AutoIP state information per netif */ +struct autoip +{ + /** the currently selected, probed, announced or used LL IP-Address */ + ip4_addr_t llipaddr; + /** current AutoIP state machine state */ + u8_t state; + /** sent number of probes or announces, dependent on state */ + u8_t sent_num; + /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ + u16_t ttw; + /** ticks until a conflict can be solved by defending */ + u8_t lastconflict; + /** total number of probed/used Link Local IP-Addresses */ + u8_t tried_llipaddr; +}; + + +void autoip_set_struct(struct netif *netif, struct autoip *autoip); +/** Remove a struct autoip previously set to the netif using autoip_set_struct() */ +#define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) +err_t autoip_start(struct netif *netif); +err_t autoip_stop(struct netif *netif); +void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); +void autoip_tmr(void); +void autoip_network_changed(struct netif *netif); +u8_t autoip_supplied_address(const struct netif *netif); + +/* for lwIP internal use by ip4.c */ +u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); + +#define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_AUTOIP */ + +#endif /* LWIP_HDR_AUTOIP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h new file mode 100644 index 0000000..346ac47 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/debug.h @@ -0,0 +1,161 @@ +/** + * @file + * Debug messages infrastructure + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_DEBUG_H +#define LWIP_HDR_DEBUG_H + +#include "lwip/arch.h" +#include "lwip/opt.h" + +/** + * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values + * @ingroup lwip_opts_debugmsg + * @{ + */ + +/** @name Debug level (LWIP_DBG_MIN_LEVEL) + * @{ + */ +/** Debug level: ALL messages*/ +#define LWIP_DBG_LEVEL_ALL 0x00 +/** Debug level: Warnings. bad checksums, dropped packets, ... */ +#define LWIP_DBG_LEVEL_WARNING 0x01 +/** Debug level: Serious. memory allocation failures, ... */ +#define LWIP_DBG_LEVEL_SERIOUS 0x02 +/** Debug level: Severe */ +#define LWIP_DBG_LEVEL_SEVERE 0x03 +/** + * @} + */ + +#define LWIP_DBG_MASK_LEVEL 0x03 +/* compatibility define only */ +#define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL + +/** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF to enable that debug message */ +#define LWIP_DBG_ON 0x80U +/** flag for LWIP_DEBUGF to disable that debug message */ +#define LWIP_DBG_OFF 0x00U +/** + * @} + */ + +/** @name Debug message types (LWIP_DBG_TYPES_ON) + * @{ + */ +/** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ +#define LWIP_DBG_TRACE 0x40U +/** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ +#define LWIP_DBG_STATE 0x20U +/** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ +#define LWIP_DBG_FRESH 0x10U +/** flag for LWIP_DEBUGF to halt after printing this debug message */ +#define LWIP_DBG_HALT 0x08U +/** + * @} + */ + +/** + * @} + */ + +/** + * @defgroup lwip_assertions Assertion handling + * @ingroup lwip_opts_debug + * @{ + */ +/** + * LWIP_NOASSERT: Disable LWIP_ASSERT checks: + * To disable assertions define LWIP_NOASSERT in arch/cc.h. + */ +#ifdef __DOXYGEN__ +#define LWIP_NOASSERT +#undef LWIP_NOASSERT +#endif +/** + * @} + */ + +#ifndef LWIP_NOASSERT +#define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ + LWIP_PLATFORM_ASSERT(message); }} while(0) +#else /* LWIP_NOASSERT */ +#define LWIP_ASSERT(message, assertion) +#endif /* LWIP_NOASSERT */ + +#ifndef LWIP_ERROR +#ifndef LWIP_NOASSERT +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) +#elif defined LWIP_DEBUG +#define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) +#else +#define LWIP_PLATFORM_ERROR(message) +#endif + +/* if "expression" isn't true, then print "message" and execute "handler" expression */ +#define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ + LWIP_PLATFORM_ERROR(message); handler;}} while(0) +#endif /* LWIP_ERROR */ + +/** Enable debug message printing, but only if debug message type is enabled + * AND is of correct type AND is at least LWIP_DBG_LEVEL. + */ +#ifdef __DOXYGEN__ +#define LWIP_DEBUG +#undef LWIP_DEBUG +#endif + +#ifdef LWIP_DEBUG +#define LWIP_DEBUGF(debug, message) do { \ + if ( \ + ((debug) & LWIP_DBG_ON) && \ + ((debug) & LWIP_DBG_TYPES_ON) && \ + ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ + LWIP_PLATFORM_DIAG(message); \ + if ((debug) & LWIP_DBG_HALT) { \ + while(1); \ + } \ + } \ + } while(0) + +#else /* LWIP_DEBUG */ +#define LWIP_DEBUGF(debug, message) +#endif /* LWIP_DEBUG */ + +#endif /* LWIP_HDR_DEBUG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h new file mode 100644 index 0000000..10b5ac2 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/def.h @@ -0,0 +1,152 @@ +/** + * @file + * various utility macros + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +/** + * @defgroup perf Performance measurement + * @ingroup sys_layer + * All defines related to this section must not be placed in lwipopts.h, + * but in arch/perf.h! + * Measurement calls made throughout lwip, these can be defined to nothing. + * - PERF_START: start measuring something. + * - PERF_STOP(x): stop measuring something, and record the result. + */ + +#ifndef LWIP_HDR_DEF_H +#define LWIP_HDR_DEF_H + +/* arch.h might define NULL already */ +#include "lwip/arch.h" +#include "lwip/opt.h" +#if LWIP_PERF +#include "arch/perf.h" +#else /* LWIP_PERF */ +#define PERF_START /* null definition */ +#define PERF_STOP(x) /* null definition */ +#endif /* LWIP_PERF */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) +#define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) + +/* Get the number of entries in an array ('x' must NOT be a pointer!) */ +#define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) + +/** Create u32_t value from bytes */ +#define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ + ((u32_t)((b) & 0xff) << 16) | \ + ((u32_t)((c) & 0xff) << 8) | \ + (u32_t)((d) & 0xff)) + +#ifndef NULL +#ifdef __cplusplus +#define NULL 0 +#else +#define NULL ((void *)0) +#endif +#endif + +#if BYTE_ORDER == BIG_ENDIAN +#define lwip_htons(x) ((u16_t)(x)) +#define lwip_ntohs(x) ((u16_t)(x)) +#define lwip_htonl(x) ((u32_t)(x)) +#define lwip_ntohl(x) ((u32_t)(x)) +#define PP_HTONS(x) ((u16_t)(x)) +#define PP_NTOHS(x) ((u16_t)(x)) +#define PP_HTONL(x) ((u32_t)(x)) +#define PP_NTOHL(x) ((u32_t)(x)) +#else /* BYTE_ORDER != BIG_ENDIAN */ +#ifndef lwip_htons +u16_t lwip_htons(u16_t x); +#endif +#define lwip_ntohs(x) lwip_htons(x) + +#ifndef lwip_htonl +u32_t lwip_htonl(u32_t x); +#endif +#define lwip_ntohl(x) lwip_htonl(x) + +/* These macros should be calculated by the preprocessor and are used + with compile-time constants only (so that there is no little-endian + overhead at runtime). */ +#define PP_HTONS(x) ((u16_t)((((x) & (u16_t)0x00ffU) << 8) | (((x) & (u16_t)0xff00U) >> 8))) +#define PP_NTOHS(x) PP_HTONS(x) +#define PP_HTONL(x) ((((x) & (u32_t)0x000000ffUL) << 24) | \ + (((x) & (u32_t)0x0000ff00UL) << 8) | \ + (((x) & (u32_t)0x00ff0000UL) >> 8) | \ + (((x) & (u32_t)0xff000000UL) >> 24)) +#define PP_NTOHL(x) PP_HTONL(x) +#endif /* BYTE_ORDER == BIG_ENDIAN */ + +/* Provide usual function names as macros for users, but this can be turned off */ +#ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS +#define htons(x) lwip_htons(x) +#define ntohs(x) lwip_ntohs(x) +#define htonl(x) lwip_htonl(x) +#define ntohl(x) lwip_ntohl(x) +#endif + +/* Functions that are not available as standard implementations. + * In cc.h, you can #define these to implementations available on + * your platform to save some code bytes if you use these functions + * in your application, too. + */ + +#ifndef lwip_itoa +/* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ +void lwip_itoa(char* result, size_t bufsize, int number); +#endif +#ifndef lwip_strnicmp +/* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ +int lwip_strnicmp(const char* str1, const char* str2, size_t len); +#endif +#ifndef lwip_stricmp +/* This can be #defined to stricmp() or strcasecmp() depending on your platform */ +int lwip_stricmp(const char* str1, const char* str2); +#endif +#ifndef lwip_strnstr +/* This can be #defined to strnstr() depending on your platform */ +char* lwip_strnstr(const char* buffer, const char* token, size_t n); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_DEF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h new file mode 100644 index 0000000..a965efd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp.h @@ -0,0 +1,139 @@ +/** + * @file + * DHCP client API + */ + +/* + * Copyright (c) 2001-2004 Leon Woestenberg + * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Leon Woestenberg + * + */ +#ifndef LWIP_HDR_DHCP_H +#define LWIP_HDR_DHCP_H + +#include "lwip/opt.h" + +#if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" +#include "lwip/udp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in seconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_SECS 60 +/** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ +#define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) +/** period (in milliseconds) of the application calling dhcp_fine_tmr() */ +#define DHCP_FINE_TIMER_MSECS 500 + +#define DHCP_BOOT_FILE_LEN 128U + +/* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ +typedef enum { + DHCP_AUTOIP_COOP_STATE_OFF = 0, + DHCP_AUTOIP_COOP_STATE_ON = 1 +} dhcp_autoip_coop_state_enum_t; + +struct dhcp +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCP state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; +#if LWIP_DHCP_AUTOIP_COOP + u8_t autoip_coop_state; +#endif + u8_t subnet_mask_given; + + u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ + u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ + u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ + u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ + u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ + u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ + u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ + ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ + ip4_addr_t offered_ip_addr; + ip4_addr_t offered_sn_mask; + ip4_addr_t offered_gw_addr; + + u32_t offered_t0_lease; /* lease period (in seconds) */ + u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ + u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ +#if LWIP_DHCP_BOOTP_FILE + ip4_addr_t offered_si_addr; + char boot_file_name[DHCP_BOOT_FILE_LEN]; +#endif /* LWIP_DHCP_BOOTPFILE */ +}; + + +void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); +/** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ +#define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) +void dhcp_cleanup(struct netif *netif); +err_t dhcp_start(struct netif *netif); +err_t dhcp_renew(struct netif *netif); +err_t dhcp_release(struct netif *netif); +void dhcp_stop(struct netif *netif); +void dhcp_release_and_stop(struct netif *netif); +void dhcp_inform(struct netif *netif); +void dhcp_network_changed(struct netif *netif); +#if DHCP_DOES_ARP_CHECK +void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); +#endif +u8_t dhcp_supplied_address(const struct netif *netif); +/* to be called every minute */ +void dhcp_coarse_tmr(void); +/* to be called every half second */ +void dhcp_fine_tmr(void); + +#if LWIP_DHCP_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP_MAX_NTP_SERVERS */ +extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP_GET_NTP_SRV */ + +#define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DHCP */ + +#endif /*LWIP_HDR_DHCP_H*/ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h new file mode 100644 index 0000000..61ca4c4 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dhcp6.h @@ -0,0 +1,104 @@ +/** + * @file + * + * DHCPv6 client: IPv6 address autoconfiguration as per + * RFC 3315 (stateful DHCPv6) and + * RFC 3736 (stateless DHCPv6). + */ + +/* + * Copyright (c) 2018 Simon Goldschmidt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Simon Goldschmidt + */ + +#ifndef LWIP_HDR_IP6_DHCP6_H +#define LWIP_HDR_IP6_DHCP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/err.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** period (in milliseconds) of the application calling dhcp6_tmr() */ +#define DHCP6_TIMER_MSECS 500 + +struct dhcp6 +{ + /** transaction identifier of last sent request */ + u32_t xid; + /** track PCB allocation state */ + u8_t pcb_allocated; + /** current DHCPv6 state machine state */ + u8_t state; + /** retries of current request */ + u8_t tries; + /** if request config is triggered while another action is active, this keeps track of it */ + u8_t request_config_pending; + /** #ticks with period DHCP6_TIMER_MSECS for request timeout */ + u16_t request_timeout; +#if LWIP_IPV6_DHCP6_STATEFUL + /* @todo: add more members here to keep track of stateful DHCPv6 data, like lease times */ +#endif /* LWIP_IPV6_DHCP6_STATEFUL */ +}; + +void dhcp6_set_struct(struct netif *netif, struct dhcp6 *dhcp6); +/** Remove a struct dhcp6 previously set to the netif using dhcp6_set_struct() */ +#define dhcp6_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6, NULL) +void dhcp6_cleanup(struct netif *netif); + +err_t dhcp6_enable_stateful(struct netif *netif); +err_t dhcp6_enable_stateless(struct netif *netif); +void dhcp6_disable(struct netif *netif); + +void dhcp6_tmr(void); + +void dhcp6_nd6_ra_trigger(struct netif *netif, u8_t managed_addr_config, u8_t other_config); + +#if LWIP_DHCP6_GET_NTP_SRV +/** This function must exist, in other to add offered NTP servers to + * the NTP (or SNTP) engine. + * See LWIP_DHCP6_MAX_NTP_SERVERS */ +extern void dhcp6_set_ntp_servers(u8_t num_ntp_servers, const ip_addr_t* ntp_server_addrs); +#endif /* LWIP_DHCP6_GET_NTP_SRV */ + +#define netif_dhcp6_data(netif) ((struct dhcp6*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP6)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6_DHCP6 */ + +#endif /* LWIP_HDR_IP6_DHCP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h new file mode 100644 index 0000000..eed6825 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/dns.h @@ -0,0 +1,131 @@ +/** + * @file + * DNS API + */ + +/** + * lwip DNS resolver header file. + + * Author: Jim Pettinato + * April 2007 + + * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LWIP_HDR_DNS_H +#define LWIP_HDR_DNS_H + +#include "lwip/opt.h" + +#if LWIP_DNS + +#include "lwip/ip_addr.h" +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** DNS timer period */ +#define DNS_TMR_INTERVAL 1000 + +/* DNS resolve types: */ +#define LWIP_DNS_ADDRTYPE_IPV4 0 +#define LWIP_DNS_ADDRTYPE_IPV6 1 +#define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ +#define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ +#if LWIP_IPV4 && LWIP_IPV6 +#ifndef LWIP_DNS_ADDRTYPE_DEFAULT +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 +#endif +#elif LWIP_IPV4 +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 +#else +#define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 +#endif + +#if DNS_LOCAL_HOSTLIST +/** struct used for local host-list */ +struct local_hostlist_entry { + /** static hostname */ + const char *name; + /** static host address in network byteorder */ + ip_addr_t addr; + struct local_hostlist_entry *next; +}; +#define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +#ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN +#define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH +#endif +#define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#if LWIP_IPV4 +extern const ip_addr_t dns_mquery_v4group; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 +extern const ip_addr_t dns_mquery_v6group; +#endif /* LWIP_IPV6 */ + +/** Callback which is invoked when a hostname is found. + * A function of this type must be implemented by the application using the DNS resolver. + * @param name pointer to the name that was looked up. + * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, + * or NULL if the name could not be found (or on any other error). + * @param callback_arg a user-specified callback argument passed to dns_gethostbyname +*/ +typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); + +void dns_init(void); +void dns_tmr(void); +void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); +const ip_addr_t* dns_getserver(u8_t numdns); +err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg); +err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, + dns_found_callback found, void *callback_arg, + u8_t dns_addrtype); + + +#if DNS_LOCAL_HOSTLIST +size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); +err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); +#if DNS_LOCAL_HOSTLIST_IS_DYNAMIC +int dns_local_removehost(const char *hostname, const ip_addr_t *addr); +err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); +#endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ +#endif /* DNS_LOCAL_HOSTLIST */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_DNS */ + +#endif /* LWIP_HDR_DNS_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h new file mode 100644 index 0000000..63c138a --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/err.h @@ -0,0 +1,117 @@ +/** + * @file + * lwIP Error codes + */ +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERR_H +#define LWIP_HDR_ERR_H + +#include "lwip/opt.h" +#include "lwip/arch.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup infrastructure_errors Error codes + * @ingroup infrastructure + * @{ + */ + +/** Definitions for error constants. */ +typedef enum { +/** No error, everything OK. */ + ERR_OK = 0, +/** Out of memory error. */ + ERR_MEM = -1, +/** Buffer error. */ + ERR_BUF = -2, +/** Timeout. */ + ERR_TIMEOUT = -3, +/** Routing problem. */ + ERR_RTE = -4, +/** Operation in progress */ + ERR_INPROGRESS = -5, +/** Illegal value. */ + ERR_VAL = -6, +/** Operation would block. */ + ERR_WOULDBLOCK = -7, +/** Address in use. */ + ERR_USE = -8, +/** Already connecting. */ + ERR_ALREADY = -9, +/** Conn already established.*/ + ERR_ISCONN = -10, +/** Not connected. */ + ERR_CONN = -11, +/** Low-level netif error */ + ERR_IF = -12, + +/** Connection aborted. */ + ERR_ABRT = -13, +/** Connection reset. */ + ERR_RST = -14, +/** Connection closed. */ + ERR_CLSD = -15, +/** Illegal argument. */ + ERR_ARG = -16 +} err_enum_t; + +/** Define LWIP_ERR_T in cc.h if you want to use + * a different type for your platform (must be signed). */ +#ifdef LWIP_ERR_T +typedef LWIP_ERR_T err_t; +#else /* LWIP_ERR_T */ +typedef s8_t err_t; +#endif /* LWIP_ERR_T*/ + +/** + * @} + */ + +#ifdef LWIP_DEBUG +extern const char *lwip_strerr(err_t err); +#else +#define lwip_strerr(x) "" +#endif /* LWIP_DEBUG */ + +#if !NO_SYS +int err_to_errno(err_t err); +#endif /* !NO_SYS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h new file mode 100644 index 0000000..eae5d6e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/errno.h @@ -0,0 +1,198 @@ +/** + * @file + * Posix Errno defines + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ERRNO_H +#define LWIP_HDR_ERRNO_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_PROVIDE_ERRNO + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Arg list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ +#define ENOSYS 38 /* Function not implemented */ +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale NFS file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ + +#ifndef errno +extern int errno; +#endif + +#else /* LWIP_PROVIDE_ERRNO */ + +/* Define LWIP_ERRNO_STDINCLUDE if you want to include here */ +#ifdef LWIP_ERRNO_STDINCLUDE +#include +#else /* LWIP_ERRNO_STDINCLUDE */ +/* Define LWIP_ERRNO_INCLUDE to an equivalent of to include the error defines here */ +#ifdef LWIP_ERRNO_INCLUDE +#include LWIP_ERRNO_INCLUDE +#endif /* LWIP_ERRNO_INCLUDE */ +#endif /* LWIP_ERRNO_STDINCLUDE */ + +#endif /* LWIP_PROVIDE_ERRNO */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ERRNO_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h new file mode 100644 index 0000000..b018f07 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/etharp.h @@ -0,0 +1,105 @@ +/** + * @file + * Ethernet output function - handles OUTGOING ethernet level traffic, implements + * ARP resolving. + * To be used in most low-level netif implementations + */ + +/* + * Copyright (c) 2001-2003 Swedish Institute of Computer Science. + * Copyright (c) 2003-2004 Leon Woestenberg + * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ + +#ifndef LWIP_HDR_NETIF_ETHARP_H +#define LWIP_HDR_NETIF_ETHARP_H + +#include "lwip/opt.h" + +#if LWIP_ARP || LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/prot/ethernet.h" + +#if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/prot/etharp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** 1 seconds period */ +#define ARP_TMR_INTERVAL 1000 + +#if ARP_QUEUEING +/** struct for queueing outgoing packets for unknown address + * defined here to be accessed by memp.h + */ +struct etharp_q_entry { + struct etharp_q_entry *next; + struct pbuf *p; +}; +#endif /* ARP_QUEUEING */ + +#define etharp_init() /* Compatibility define, no init needed. */ +void etharp_tmr(void); +ssize_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, + struct eth_addr **eth_ret, const ip4_addr_t **ip_ret); +int etharp_get_entry(size_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret); +err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr); +err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q); +err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr); +/** For Ethernet network interfaces, we might want to send "gratuitous ARP"; + * this is an ARP packet sent by a node in order to spontaneously cause other + * nodes to update an entry in their ARP cache. + * From RFC 3220 "IP Mobility Support for IPv4" section 4.6. */ +#define etharp_gratuitous(netif) etharp_request((netif), netif_ip4_addr(netif)) +void etharp_cleanup_netif(struct netif *netif); + +#if ETHARP_SUPPORT_STATIC_ENTRIES +err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr); +err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr); +#endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ + +void etharp_input(struct pbuf *p, struct netif *netif); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_ARP */ +#endif /* LWIP_ARP || LWIP_ETHERNET */ + +#endif /* LWIP_HDR_NETIF_ETHARP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h new file mode 100644 index 0000000..f393c9c --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ethip6.h @@ -0,0 +1,68 @@ +/** + * @file + * + * Ethernet output for IPv6. Uses ND tables for link-layer addressing. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ + +#ifndef LWIP_HDR_ETHIP6_H +#define LWIP_HDR_ETHIP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 && LWIP_ETHERNET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/pbuf.h" +#include "lwip/ip6.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" + + +#ifdef __cplusplus +extern "C" { +#endif + + +err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 && LWIP_ETHERNET */ + +#endif /* LWIP_HDR_ETHIP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h new file mode 100644 index 0000000..d0d67b6 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp.h @@ -0,0 +1,110 @@ +/** + * @file + * ICMP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_ICMP_H +#define LWIP_HDR_ICMP_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp.h" + +#if LWIP_IPV6 && LWIP_ICMP6 +#include "lwip/icmp6.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** ICMP destination unreachable codes */ +enum icmp_dur_type { + /** net unreachable */ + ICMP_DUR_NET = 0, + /** host unreachable */ + ICMP_DUR_HOST = 1, + /** protocol unreachable */ + ICMP_DUR_PROTO = 2, + /** port unreachable */ + ICMP_DUR_PORT = 3, + /** fragmentation needed and DF set */ + ICMP_DUR_FRAG = 4, + /** source route failed */ + ICMP_DUR_SR = 5 +}; + +/** ICMP time exceeded codes */ +enum icmp_te_type { + /** time to live exceeded in transit */ + ICMP_TE_TTL = 0, + /** fragment reassembly time exceeded */ + ICMP_TE_FRAG = 1 +}; + +#if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ + +void icmp_input(struct pbuf *p, struct netif *inp); +void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t); +void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t); + +#endif /* LWIP_IPV4 && LWIP_ICMP */ + +#if LWIP_IPV4 && LWIP_IPV6 +#if LWIP_ICMP && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) ((isipv6) ? \ + icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) : \ + icmp_dest_unreach(pbuf, ICMP_DUR_PORT)) +#elif LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) do{ if(!(isipv6)) { icmp_dest_unreach(pbuf, ICMP_DUR_PORT);}}while(0) +#elif LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) do{ if(isipv6) { icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT);}}while(0) +#else +#define icmp_port_unreach(isipv6, pbuf) +#endif +#elif LWIP_IPV6 && LWIP_ICMP6 +#define icmp_port_unreach(isipv6, pbuf) icmp6_dest_unreach(pbuf, ICMP6_DUR_PORT) +#elif LWIP_IPV4 && LWIP_ICMP +#define icmp_port_unreach(isipv6, pbuf) icmp_dest_unreach(pbuf, ICMP_DUR_PORT) +#else /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) */ +#define icmp_port_unreach(isipv6, pbuf) +#endif /* (LWIP_IPV6 && LWIP_ICMP6) || (LWIP_IPV4 && LWIP_ICMP) LWIP_IPV4*/ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_ICMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h new file mode 100644 index 0000000..cb383d9 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/icmp6.h @@ -0,0 +1,72 @@ +/** + * @file + * + * IPv6 version of ICMP, as per RFC 4443. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_ICMP6_H +#define LWIP_HDR_ICMP6_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/netif.h" +#include "lwip/prot/icmp6.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +void icmp6_input(struct pbuf *p, struct netif *inp); +void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c); +void icmp6_packet_too_big(struct pbuf *p, u32_t mtu); +void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c); +void icmp6_time_exceeded_with_addrs(struct pbuf *p, enum icmp6_te_code c, + const ip6_addr_t *src_addr, const ip6_addr_t *dest_addr); +void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, const void *pointer); + +#endif /* LWIP_ICMP6 && LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* LWIP_HDR_ICMP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h new file mode 100644 index 0000000..d8261f7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/if_api.h @@ -0,0 +1,68 @@ +/** + * @file + * Interface Identification APIs from: + * RFC 3493: Basic Socket Interface Extensions for IPv6 + * Section 4: Interface Identification + */ + +/* + * Copyright (c) 2017 Joel Cunningham, Garmin International, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Joel Cunningham + * + */ +#ifndef LWIP_HDR_IF_H +#define LWIP_HDR_IF_H + +#include "lwip/opt.h" + +#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define IF_NAMESIZE NETIF_NAMESIZE + +char * lwip_if_indextoname(unsigned int ifindex, char *ifname); +unsigned int lwip_if_nametoindex(const char *ifname); + +#if LWIP_COMPAT_SOCKETS +#define if_indextoname(ifindex, ifname) lwip_if_indextoname(ifindex,ifname) +#define if_nametoindex(ifname) lwip_if_nametoindex(ifname) +#endif /* LWIP_COMPAT_SOCKETS */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_SOCKET */ + +#endif /* LWIP_HDR_IF_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h new file mode 100644 index 0000000..ca52a2d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/igmp.h @@ -0,0 +1,115 @@ +/** + * @file + * IGMP API + */ + +/* + * Copyright (c) 2002 CITEL Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * This file is a contribution to the lwIP TCP/IP stack. + * The Swedish Institute of Computer Science and Adam Dunkels + * are specifically granted permission to redistribute this + * source code. +*/ + +#ifndef LWIP_HDR_IGMP_H +#define LWIP_HDR_IGMP_H + +#include "lwip/opt.h" +#include "lwip/ip_addr.h" +#include "lwip/netif.h" +#include "lwip/pbuf.h" + +#if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* IGMP timer */ +#define IGMP_TMR_INTERVAL 100 /* Milliseconds */ +#define IGMP_V1_DELAYING_MEMBER_TMR (1000/IGMP_TMR_INTERVAL) +#define IGMP_JOIN_DELAYING_MEMBER_TMR (500 /IGMP_TMR_INTERVAL) + +/* Compatibility defines (don't use for new code) */ +#define IGMP_DEL_MAC_FILTER NETIF_DEL_MAC_FILTER +#define IGMP_ADD_MAC_FILTER NETIF_ADD_MAC_FILTER + +/** + * igmp group structure - there is + * a list of groups for each interface + * these should really be linked from the interface, but + * if we keep them separate we will not affect the lwip original code + * too much + * + * There will be a group for the all systems group address but this + * will not run the state machine as it is used to kick off reports + * from all the other groups + */ +struct igmp_group { + /** next link */ + struct igmp_group *next; + /** multicast address */ + ip4_addr_t group_address; + /** signifies we were the last person to report */ + u8_t last_reporter_flag; + /** current state of the group */ + u8_t group_state; + /** timer for reporting, negative is OFF */ + u16_t timer; + /** counter of simultaneous uses */ + u8_t use; +}; + +/* Prototypes */ +void igmp_init(void); +err_t igmp_start(struct netif *netif); +err_t igmp_stop(struct netif *netif); +void igmp_report_groups(struct netif *netif); +struct igmp_group *igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr); +void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest); +err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr); +err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr); +void igmp_tmr(void); + +/** @ingroup igmp + * Get list head of IGMP groups for netif. + * Note: The allsystems group IP is contained in the list as first entry. + * @see @ref netif_set_igmp_mac_filter() + */ +#define netif_igmp_data(netif) ((struct igmp_group *)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP)) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 && LWIP_IGMP */ + +#endif /* LWIP_HDR_IGMP_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h new file mode 100644 index 0000000..36f6ec7 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet.h @@ -0,0 +1,169 @@ +/** + * @file + * This file (together with sockets.h) aims to provide structs and functions from + * - arpa/inet.h + * - netinet/in.h + * + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_H +#define LWIP_HDR_INET_H + +#include "lwip/opt.h" +#include "lwip/def.h" +#include "lwip/ip_addr.h" +#include "lwip/ip6_addr.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* If your port already typedef's in_addr_t, define IN_ADDR_T_DEFINED + to prevent this code from redefining it. */ +#if !defined(in_addr_t) && !defined(IN_ADDR_T_DEFINED) +typedef u32_t in_addr_t; +#endif + +struct in_addr { + in_addr_t s_addr; +}; + +struct in6_addr { + union { + u32_t u32_addr[4]; + u8_t u8_addr[16]; + } un; +#define s6_addr un.u8_addr +}; + +/** 255.255.255.255 */ +#define INADDR_NONE IPADDR_NONE +/** 127.0.0.1 */ +#define INADDR_LOOPBACK IPADDR_LOOPBACK +/** 0.0.0.0 */ +#define INADDR_ANY IPADDR_ANY +/** 255.255.255.255 */ +#define INADDR_BROADCAST IPADDR_BROADCAST + +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 wildcard address. */ +#define IN6ADDR_ANY_INIT {{{0,0,0,0}}} +/** This macro can be used to initialize a variable of type struct in6_addr + to the IPv6 loopback address. */ +#define IN6ADDR_LOOPBACK_INIT {{{0,0,0,PP_HTONL(1)}}} +/** This variable is initialized by the system to contain the wildcard IPv6 address. */ +extern const struct in6_addr in6addr_any; + +/* Definitions of the bits in an (IPv4) Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IN_CLASSA(a) IP_CLASSA(a) +#define IN_CLASSA_NET IP_CLASSA_NET +#define IN_CLASSA_NSHIFT IP_CLASSA_NSHIFT +#define IN_CLASSA_HOST IP_CLASSA_HOST +#define IN_CLASSA_MAX IP_CLASSA_MAX + +#define IN_CLASSB(b) IP_CLASSB(b) +#define IN_CLASSB_NET IP_CLASSB_NET +#define IN_CLASSB_NSHIFT IP_CLASSB_NSHIFT +#define IN_CLASSB_HOST IP_CLASSB_HOST +#define IN_CLASSB_MAX IP_CLASSB_MAX + +#define IN_CLASSC(c) IP_CLASSC(c) +#define IN_CLASSC_NET IP_CLASSC_NET +#define IN_CLASSC_NSHIFT IP_CLASSC_NSHIFT +#define IN_CLASSC_HOST IP_CLASSC_HOST +#define IN_CLASSC_MAX IP_CLASSC_MAX + +#define IN_CLASSD(d) IP_CLASSD(d) +#define IN_CLASSD_NET IP_CLASSD_NET /* These ones aren't really */ +#define IN_CLASSD_NSHIFT IP_CLASSD_NSHIFT /* net and host fields, but */ +#define IN_CLASSD_HOST IP_CLASSD_HOST /* routing needn't know. */ +#define IN_CLASSD_MAX IP_CLASSD_MAX + +#define IN_MULTICAST(a) IP_MULTICAST(a) + +#define IN_EXPERIMENTAL(a) IP_EXPERIMENTAL(a) +#define IN_BADCLASS(a) IP_BADCLASS(a) + +#define IN_LOOPBACKNET IP_LOOPBACKNET + + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN IP4ADDR_STRLEN_MAX +#endif +#if LWIP_IPV6 +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN IP6ADDR_STRLEN_MAX +#endif +#endif + +#if LWIP_IPV4 + +#define inet_addr_from_ip4addr(target_inaddr, source_ipaddr) ((target_inaddr)->s_addr = ip4_addr_get_u32(source_ipaddr)) +#define inet_addr_to_ip4addr(target_ipaddr, source_inaddr) (ip4_addr_set_u32(target_ipaddr, (source_inaddr)->s_addr)) + +/* directly map this to the lwip internal functions */ +#define inet_addr(cp) ipaddr_addr(cp) +#define inet_aton(cp, addr) ip4addr_aton(cp, (ip4_addr_t*)addr) +#define inet_ntoa(addr) ip4addr_ntoa((const ip4_addr_t*)&(addr)) +#define inet_ntoa_r(addr, buf, buflen) ip4addr_ntoa_r((const ip4_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +#define inet6_addr_from_ip6addr(target_in6addr, source_ip6addr) {(target_in6addr)->un.u32_addr[0] = (source_ip6addr)->addr[0]; \ + (target_in6addr)->un.u32_addr[1] = (source_ip6addr)->addr[1]; \ + (target_in6addr)->un.u32_addr[2] = (source_ip6addr)->addr[2]; \ + (target_in6addr)->un.u32_addr[3] = (source_ip6addr)->addr[3];} +#define inet6_addr_to_ip6addr(target_ip6addr, source_in6addr) {(target_ip6addr)->addr[0] = (source_in6addr)->un.u32_addr[0]; \ + (target_ip6addr)->addr[1] = (source_in6addr)->un.u32_addr[1]; \ + (target_ip6addr)->addr[2] = (source_in6addr)->un.u32_addr[2]; \ + (target_ip6addr)->addr[3] = (source_in6addr)->un.u32_addr[3]; \ + ip6_addr_clear_zone(target_ip6addr);} + +/* directly map this to the lwip internal functions */ +#define inet6_aton(cp, addr) ip6addr_aton(cp, (ip6_addr_t*)addr) +#define inet6_ntoa(addr) ip6addr_ntoa((const ip6_addr_t*)&(addr)) +#define inet6_ntoa_r(addr, buf, buflen) ip6addr_ntoa_r((const ip6_addr_t*)&(addr), buf, buflen) + +#endif /* LWIP_IPV6 */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h new file mode 100644 index 0000000..46ec2ae --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/inet_chksum.h @@ -0,0 +1,105 @@ +/** + * @file + * IP checksum calculation functions + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INET_CHKSUM_H +#define LWIP_HDR_INET_CHKSUM_H + +#include "lwip/opt.h" + +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" + +/** Swap the bytes in an u16_t: much like lwip_htons() for little-endian */ +#ifndef SWAP_BYTES_IN_WORD +#define SWAP_BYTES_IN_WORD(w) (((w) & 0xff) << 8) | (((w) & 0xff00) >> 8) +#endif /* SWAP_BYTES_IN_WORD */ + +/** Split an u32_t in two u16_ts and add them up */ +#ifndef FOLD_U32T +#define FOLD_U32T(u) ((u32_t)(((u) >> 16) + ((u) & 0x0000ffffUL))) +#endif + +#if LWIP_CHECKSUM_ON_COPY +/** Function-like macro: same as MEMCPY but returns the checksum of copied data + as u16_t */ +# ifndef LWIP_CHKSUM_COPY +# define LWIP_CHKSUM_COPY(dst, src, len) lwip_chksum_copy(dst, src, len) +# ifndef LWIP_CHKSUM_COPY_ALGORITHM +# define LWIP_CHKSUM_COPY_ALGORITHM 1 +# endif /* LWIP_CHKSUM_COPY_ALGORITHM */ +# else /* LWIP_CHKSUM_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +# endif /* LWIP_CHKSUM_COPY */ +#else /* LWIP_CHECKSUM_ON_COPY */ +# define LWIP_CHKSUM_COPY_ALGORITHM 0 +#endif /* LWIP_CHECKSUM_ON_COPY */ + +#ifdef __cplusplus +extern "C" { +#endif + +u16_t inet_chksum(const void *dataptr, u16_t len); +u16_t inet_chksum_pbuf(struct pbuf *p); +#if LWIP_CHKSUM_COPY_ALGORITHM +u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len); +#endif /* LWIP_CHKSUM_COPY_ALGORITHM */ + +#if LWIP_IPV4 +u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip4_addr_t *src, const ip4_addr_t *dest); +u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, + u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest); +#endif /* LWIP_IPV4 */ + +#if LWIP_IPV6 +u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip6_addr_t *src, const ip6_addr_t *dest); +u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest); +#endif /* LWIP_IPV6 */ + + +u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, + const ip_addr_t *src, const ip_addr_t *dest); +u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, + u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INET_H */ + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h new file mode 100644 index 0000000..48c256b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/init.h @@ -0,0 +1,100 @@ +/** + * @file + * lwIP initialization API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_INIT_H +#define LWIP_HDR_INIT_H + +#include "lwip/opt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup lwip_version Version + * @ingroup lwip + * @{ + */ + +/** X.x.x: Major version of the stack */ +#define LWIP_VERSION_MAJOR 2 +/** x.X.x: Minor version of the stack */ +#define LWIP_VERSION_MINOR 1 +/** x.x.X: Revision of the stack */ +#define LWIP_VERSION_REVISION 2 +/** For release candidates, this is set to 1..254 + * For official releases, this is set to 255 (LWIP_RC_RELEASE) + * For development versions (Git), this is set to 0 (LWIP_RC_DEVELOPMENT) */ +#define LWIP_VERSION_RC LWIP_RC_RELEASE + +/** LWIP_VERSION_RC is set to LWIP_RC_RELEASE for official releases */ +#define LWIP_RC_RELEASE 255 +/** LWIP_VERSION_RC is set to LWIP_RC_DEVELOPMENT for Git versions */ +#define LWIP_RC_DEVELOPMENT 0 + +#define LWIP_VERSION_IS_RELEASE (LWIP_VERSION_RC == LWIP_RC_RELEASE) +#define LWIP_VERSION_IS_DEVELOPMENT (LWIP_VERSION_RC == LWIP_RC_DEVELOPMENT) +#define LWIP_VERSION_IS_RC ((LWIP_VERSION_RC != LWIP_RC_RELEASE) && (LWIP_VERSION_RC != LWIP_RC_DEVELOPMENT)) + +/* Some helper defines to get a version string */ +#define LWIP_VERSTR2(x) #x +#define LWIP_VERSTR(x) LWIP_VERSTR2(x) +#if LWIP_VERSION_IS_RELEASE +#define LWIP_VERSION_STRING_SUFFIX "" +#elif LWIP_VERSION_IS_DEVELOPMENT +#define LWIP_VERSION_STRING_SUFFIX "d" +#else +#define LWIP_VERSION_STRING_SUFFIX "rc" LWIP_VERSTR(LWIP_VERSION_RC) +#endif + +/** Provides the version of the stack */ +#define LWIP_VERSION ((LWIP_VERSION_MAJOR) << 24 | (LWIP_VERSION_MINOR) << 16 | \ + (LWIP_VERSION_REVISION) << 8 | (LWIP_VERSION_RC)) +/** Provides the version of the stack as string */ +#define LWIP_VERSION_STRING LWIP_VERSTR(LWIP_VERSION_MAJOR) "." LWIP_VERSTR(LWIP_VERSION_MINOR) "." LWIP_VERSTR(LWIP_VERSION_REVISION) LWIP_VERSION_STRING_SUFFIX + +/** + * @} + */ + +/* Modules initialization */ +void lwip_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_INIT_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h new file mode 100644 index 0000000..28889d5 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip.h @@ -0,0 +1,330 @@ +/** + * @file + * IP API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP_H +#define LWIP_HDR_IP_H + +#include "lwip/opt.h" + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/ip4.h" +#include "lwip/ip6.h" +#include "lwip/prot/ip.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is passed as the destination address to ip_output_if (not + to ip_output), meaning that an IP header already is constructed + in the pbuf. This is used when TCP retransmits. */ +#define LWIP_IP_HDRINCL NULL + +/** pbufs passed to IP must have a ref-count of 1 as their payload pointer + gets altered as the packet is passed down the stack */ +#ifndef LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX +#define LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p) LWIP_ASSERT("p->ref == 1", (p)->ref == 1) +#endif + +#if LWIP_NETIF_USE_HINTS +#define IP_PCB_NETIFHINT ;struct netif_hint netif_hints +#else /* LWIP_NETIF_USE_HINTS */ +#define IP_PCB_NETIFHINT +#endif /* LWIP_NETIF_USE_HINTS */ + +/** This is the common part of all PCB types. It needs to be at the + beginning of a PCB type definition. It is located here so that + changes to this common part are made in one location instead of + having to change all PCB structs. */ +#define IP_PCB \ + /* ip addresses in network byte order */ \ + ip_addr_t local_ip; \ + ip_addr_t remote_ip; \ + /* Bound netif index */ \ + u8_t netif_idx; \ + /* Socket options */ \ + u8_t so_options; \ + /* Type Of Service */ \ + u8_t tos; \ + /* Time To Live */ \ + u8_t ttl \ + /* link layer address resolution hint */ \ + IP_PCB_NETIFHINT + +struct ip_pcb { + /* Common members of all PCB types */ + IP_PCB; +}; + +/* + * Option flags per-socket. These are the same like SO_XXX in sockets.h + */ +#define SOF_REUSEADDR 0x04U /* allow local address reuse */ +#define SOF_KEEPALIVE 0x08U /* keep connections alive */ +#define SOF_BROADCAST 0x20U /* permit to send and to receive broadcast messages (see IP_SOF_BROADCAST option) */ + +/* These flags are inherited (e.g. from a listen-pcb to a connection-pcb): */ +#define SOF_INHERITED (SOF_REUSEADDR|SOF_KEEPALIVE) + +/** Global variables of this module, kept in a struct for efficient access using base+index. */ +struct ip_globals +{ + /** The interface that accepted the packet for the current callback invocation. */ + struct netif *current_netif; + /** The interface that received the packet for the current callback invocation. */ + struct netif *current_input_netif; +#if LWIP_IPV4 + /** Header of the input packet currently being processed. */ + const struct ip_hdr *current_ip4_header; +#endif /* LWIP_IPV4 */ +#if LWIP_IPV6 + /** Header of the input IPv6 packet currently being processed. */ + struct ip6_hdr *current_ip6_header; +#endif /* LWIP_IPV6 */ + /** Total header length of current_ip4/6_header (i.e. after this, the UDP/TCP header starts) */ + u16_t current_ip_header_tot_len; + /** Source IP address of current_header */ + ip_addr_t current_iphdr_src; + /** Destination IP address of current_header */ + ip_addr_t current_iphdr_dest; +}; +extern struct ip_globals ip_data; + + +/** Get the interface that accepted the current packet. + * This may or may not be the receiving netif, depending on your netif/network setup. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_netif() (ip_data.current_netif) +/** Get the interface that received the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip_current_input_netif() (ip_data.current_input_netif) +/** Total header length of ip(6)_current_header() (i.e. after this, the UDP/TCP header starts) */ +#define ip_current_header_tot_len() (ip_data.current_ip_header_tot_len) +/** Source IP address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +#if LWIP_IPV4 && LWIP_IPV6 +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Returns TRUE if the current IP input packet is IPv6, FALSE if it is IPv4 */ +#define ip_current_is_v6() (ip6_current_header() != NULL) +/** Source IPv6 address of current_header */ +#define ip6_current_src_addr() (ip_2_ip6(&ip_data.current_iphdr_src)) +/** Destination IPv6 address of current_header */ +#define ip6_current_dest_addr() (ip_2_ip6(&ip_data.current_iphdr_dest)) +/** Get the transport layer protocol */ +#define ip_current_header_proto() (ip_current_is_v6() ? \ + IP6H_NEXTH(ip6_current_header()) :\ + IPH_PROTO(ip4_current_header())) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((ip_current_is_v6() ? \ + (const u8_t*)ip6_current_header() : (const u8_t*)ip4_current_header()) + ip_current_header_tot_len())) + +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (ip_2_ip4(&ip_data.current_iphdr_src)) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (ip_2_ip4(&ip_data.current_iphdr_dest)) + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv4 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip4_current_header() ip_data.current_ip4_header +/** Always returns FALSE when only supporting IPv4 only */ +#define ip_current_is_v6() 0 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IPH_PROTO(ip4_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)((const u8_t*)ip4_current_header() + ip_current_header_tot_len())) +/** Source IP4 address of current_header */ +#define ip4_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP4 address of current_header */ +#define ip4_current_dest_addr() (&ip_data.current_iphdr_dest) + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +/** Get the IPv6 header of the current packet. + * This function must only be called from a receive callback (udp_recv, + * raw_recv, tcp_accept). It will return NULL otherwise. */ +#define ip6_current_header() ((const struct ip6_hdr*)(ip_data.current_ip6_header)) +/** Always returns TRUE when only supporting IPv6 only */ +#define ip_current_is_v6() 1 +/** Get the transport layer protocol */ +#define ip_current_header_proto() IP6H_NEXTH(ip6_current_header()) +/** Get the transport layer header */ +#define ip_next_header_ptr() ((const void*)(((const u8_t*)ip6_current_header()) + ip_current_header_tot_len())) +/** Source IP6 address of current_header */ +#define ip6_current_src_addr() (&ip_data.current_iphdr_src) +/** Destination IP6 address of current_header */ +#define ip6_current_dest_addr() (&ip_data.current_iphdr_dest) + +#endif /* LWIP_IPV6 */ + +/** Union source address of current_header */ +#define ip_current_src_addr() (&ip_data.current_iphdr_src) +/** Union destination address of current_header */ +#define ip_current_dest_addr() (&ip_data.current_iphdr_dest) + +/** Gets an IP pcb option (SOF_* flags) */ +#define ip_get_option(pcb, opt) ((pcb)->so_options & (opt)) +/** Sets an IP pcb option (SOF_* flags) */ +#define ip_set_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options | (opt))) +/** Resets an IP pcb option (SOF_* flags) */ +#define ip_reset_option(pcb, opt) ((pcb)->so_options = (u8_t)((pcb)->so_options & ~(opt))) + +#if LWIP_IPV4 && LWIP_IPV6 +/** + * @ingroup ip + * Output IP packet, netif is selected by source address + */ +#define ip_output(p, src, dest, ttl, tos, proto) \ + (IP_IS_V6(dest) ? \ + ip6_output(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto) : \ + ip4_output(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto)) +/** + * @ingroup ip + * Output IP packet to specified interface + */ +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** + * @ingroup ip + * Output IP packet to interface specifying source address + */ +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if_src(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif) : \ + ip4_output_if_src(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif)) +/** Output IP packet that already includes an IP header. */ +#define ip_output_if_hdrincl(p, src, dest, netif) \ + (IP_IS_V6(dest) ? \ + ip6_output_if(p, ip_2_ip6(src), LWIP_IP_HDRINCL, 0, 0, 0, netif) : \ + ip4_output_if(p, ip_2_ip4(src), LWIP_IP_HDRINCL, 0, 0, 0, netif)) +/** Output IP packet with netif_hint */ +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + (IP_IS_V6(dest) ? \ + ip6_output_hinted(p, ip_2_ip6(src), ip_2_ip6(dest), ttl, tos, proto, netif_hint) : \ + ip4_output_hinted(p, ip_2_ip4(src), ip_2_ip4(dest), ttl, tos, proto, netif_hint)) +/** + * @ingroup ip + * Get netif for address combination. See \ref ip6_route and \ref ip4_route + */ +#define ip_route(src, dest) \ + (IP_IS_V6(dest) ? \ + ip6_route(ip_2_ip6(src), ip_2_ip6(dest)) : \ + ip4_route_src(ip_2_ip4(src), ip_2_ip4(dest))) +/** + * @ingroup ip + * Get netif for IP. + */ +#define ip_netif_get_local_ip(netif, dest) (IP_IS_V6(dest) ? \ + ip6_netif_get_local_ip(netif, ip_2_ip6(dest)) : \ + ip4_netif_get_local_ip(netif)) +#define ip_debug_print(is_ipv6, p) ((is_ipv6) ? ip6_debug_print(p) : ip4_debug_print(p)) + +err_t ip_input(struct pbuf *p, struct netif *inp); + +#elif LWIP_IPV4 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip4_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip4_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip4_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip4_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip4_route_src(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip4_netif_get_local_ip(netif) +#define ip_debug_print(is_ipv6, p) ip4_debug_print(p) + +#define ip_input ip4_input + +#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */ + +#define ip_output(p, src, dest, ttl, tos, proto) \ + ip6_output(p, src, dest, ttl, tos, proto) +#define ip_output_if(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if(p, src, dest, ttl, tos, proto, netif) +#define ip_output_if_src(p, src, dest, ttl, tos, proto, netif) \ + ip6_output_if_src(p, src, dest, ttl, tos, proto, netif) +#define ip_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) \ + ip6_output_hinted(p, src, dest, ttl, tos, proto, netif_hint) +#define ip_output_if_hdrincl(p, src, dest, netif) \ + ip6_output_if(p, src, LWIP_IP_HDRINCL, 0, 0, 0, netif) +#define ip_route(src, dest) \ + ip6_route(src, dest) +#define ip_netif_get_local_ip(netif, dest) \ + ip6_netif_get_local_ip(netif, dest) +#define ip_debug_print(is_ipv6, p) ip6_debug_print(p) + +#define ip_input ip6_input + +#endif /* LWIP_IPV6 */ + +#define ip_route_get_local_ip(src, dest, netif, ipaddr) do { \ + (netif) = ip_route(src, dest); \ + (ipaddr) = ip_netif_get_local_ip(netif, dest); \ +}while(0) + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h new file mode 100644 index 0000000..6409c4b --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4.h @@ -0,0 +1,111 @@ +/** + * @file + * IPv4 API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_H +#define LWIP_HDR_IP4_H + +#include "lwip/opt.h" + +#if LWIP_IPV4 + +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/ip4_addr.h" +#include "lwip/err.h" +#include "lwip/netif.h" +#include "lwip/prot/ip4.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef LWIP_HOOK_IP4_ROUTE_SRC +#define LWIP_IPV4_SRC_ROUTING 1 +#else +#define LWIP_IPV4_SRC_ROUTING 0 +#endif + +/** Currently, the function ip_output_if_opt() is only used with IGMP */ +#define IP_OPTIONS_SEND (LWIP_IPV4 && LWIP_IGMP) + +#define ip_init() /* Compatibility define, no init needed. */ +struct netif *ip4_route(const ip4_addr_t *dest); +#if LWIP_IPV4_SRC_ROUTING +struct netif *ip4_route_src(const ip4_addr_t *src, const ip4_addr_t *dest); +#else /* LWIP_IPV4_SRC_ROUTING */ +#define ip4_route_src(src, dest) ip4_route(dest) +#endif /* LWIP_IPV4_SRC_ROUTING */ +err_t ip4_input(struct pbuf *p, struct netif *inp); +err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto); +err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if IP_OPTIONS_SEND +err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, + u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, + u16_t optlen); +#endif /* IP_OPTIONS_SEND */ + +#if LWIP_MULTICAST_TX_OPTIONS +void ip4_set_default_multicast_netif(struct netif* default_multicast_netif); +#endif /* LWIP_MULTICAST_TX_OPTIONS */ + +#define ip4_netif_get_local_ip(netif) (((netif) != NULL) ? netif_ip_addr4(netif) : NULL) + +#if IP_DEBUG +void ip4_debug_print(struct pbuf *p); +#else +#define ip4_debug_print(p) +#endif /* IP_DEBUG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_H */ + + diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h new file mode 100644 index 0000000..9990756 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_addr.h @@ -0,0 +1,216 @@ +/** + * @file + * IPv4 address API + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Adam Dunkels + * + */ +#ifndef LWIP_HDR_IP4_ADDR_H +#define LWIP_HDR_IP4_ADDR_H + +#include "lwip/opt.h" +#include "lwip/def.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +/** This is the aligned version of ip4_addr_t, + used as local variable, on the stack, etc. */ +struct ip4_addr { + u32_t addr; +}; + +/** ip4_addr_t uses a struct for convenience only, so that the same defines can + * operate both on ip4_addr_t as well as on ip4_addr_p_t. */ +typedef struct ip4_addr ip4_addr_t; + +/* Forward declaration to not include netif.h */ +struct netif; + +/** 255.255.255.255 */ +#define IPADDR_NONE ((u32_t)0xffffffffUL) +/** 127.0.0.1 */ +#define IPADDR_LOOPBACK ((u32_t)0x7f000001UL) +/** 0.0.0.0 */ +#define IPADDR_ANY ((u32_t)0x00000000UL) +/** 255.255.255.255 */ +#define IPADDR_BROADCAST ((u32_t)0xffffffffUL) + +/* Definitions of the bits in an Internet address integer. + + On subnets, host and network parts are found according to + the subnet mask, not these masks. */ +#define IP_CLASSA(a) ((((u32_t)(a)) & 0x80000000UL) == 0) +#define IP_CLASSA_NET 0xff000000 +#define IP_CLASSA_NSHIFT 24 +#define IP_CLASSA_HOST (0xffffffff & ~IP_CLASSA_NET) +#define IP_CLASSA_MAX 128 + +#define IP_CLASSB(a) ((((u32_t)(a)) & 0xc0000000UL) == 0x80000000UL) +#define IP_CLASSB_NET 0xffff0000 +#define IP_CLASSB_NSHIFT 16 +#define IP_CLASSB_HOST (0xffffffff & ~IP_CLASSB_NET) +#define IP_CLASSB_MAX 65536 + +#define IP_CLASSC(a) ((((u32_t)(a)) & 0xe0000000UL) == 0xc0000000UL) +#define IP_CLASSC_NET 0xffffff00 +#define IP_CLASSC_NSHIFT 8 +#define IP_CLASSC_HOST (0xffffffff & ~IP_CLASSC_NET) + +#define IP_CLASSD(a) (((u32_t)(a) & 0xf0000000UL) == 0xe0000000UL) +#define IP_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IP_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IP_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IP_MULTICAST(a) IP_CLASSD(a) + +#define IP_EXPERIMENTAL(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) +#define IP_BADCLASS(a) (((u32_t)(a) & 0xf0000000UL) == 0xf0000000UL) + +#define IP_LOOPBACKNET 127 /* official! */ + +/** Set an IP address given by the four byte-parts */ +#define IP4_ADDR(ipaddr, a,b,c,d) (ipaddr)->addr = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Copy IP address - faster than ip4_addr_set: no NULL check */ +#define ip4_addr_copy(dest, src) ((dest).addr = (src).addr) +/** Safely copy one IP address to another (src may be NULL) */ +#define ip4_addr_set(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0 : \ + (src)->addr)) +/** Set complete address to zero */ +#define ip4_addr_set_zero(ipaddr) ((ipaddr)->addr = 0) +/** Set address to IPADDR_ANY (no need for lwip_htonl()) */ +#define ip4_addr_set_any(ipaddr) ((ipaddr)->addr = IPADDR_ANY) +/** Set address to loopback address */ +#define ip4_addr_set_loopback(ipaddr) ((ipaddr)->addr = PP_HTONL(IPADDR_LOOPBACK)) +/** Check if an address is in the loopback region */ +#define ip4_addr_isloopback(ipaddr) (((ipaddr)->addr & PP_HTONL(IP_CLASSA_NET)) == PP_HTONL(((u32_t)IP_LOOPBACKNET) << 24)) +/** Safely copy one IP address to another and change byte order + * from host- to network-order. */ +#define ip4_addr_set_hton(dest, src) ((dest)->addr = \ + ((src) == NULL ? 0:\ + lwip_htonl((src)->addr))) +/** IPv4 only: set the IP address given as an u32_t */ +#define ip4_addr_set_u32(dest_ipaddr, src_u32) ((dest_ipaddr)->addr = (src_u32)) +/** IPv4 only: get the IP address as an u32_t */ +#define ip4_addr_get_u32(src_ipaddr) ((src_ipaddr)->addr) + +/** Get the network address by combining host address with netmask */ +#define ip4_addr_get_network(target, host, netmask) do { ((target)->addr = ((host)->addr) & ((netmask)->addr)); } while(0) + +/** + * Determine if two address are on the same network. + * + * @arg addr1 IP address 1 + * @arg addr2 IP address 2 + * @arg mask network identifier mask + * @return !0 if the network identifiers of both address match + */ +#define ip4_addr_netcmp(addr1, addr2, mask) (((addr1)->addr & \ + (mask)->addr) == \ + ((addr2)->addr & \ + (mask)->addr)) +#define ip4_addr_cmp(addr1, addr2) ((addr1)->addr == (addr2)->addr) + +#define ip4_addr_isany_val(addr1) ((addr1).addr == IPADDR_ANY) +#define ip4_addr_isany(addr1) ((addr1) == NULL || ip4_addr_isany_val(*(addr1))) + +#define ip4_addr_isbroadcast(addr1, netif) ip4_addr_isbroadcast_u32((addr1)->addr, netif) +u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif); + +#define ip_addr_netmask_valid(netmask) ip4_addr_netmask_valid((netmask)->addr) +u8_t ip4_addr_netmask_valid(u32_t netmask); + +#define ip4_addr_ismulticast(addr1) (((addr1)->addr & PP_HTONL(0xf0000000UL)) == PP_HTONL(0xe0000000UL)) + +#define ip4_addr_islinklocal(addr1) (((addr1)->addr & PP_HTONL(0xffff0000UL)) == PP_HTONL(0xa9fe0000UL)) + +#define ip4_addr_debug_print_parts(debug, a, b, c, d) \ + LWIP_DEBUGF(debug, ("%" U16_F ".%" U16_F ".%" U16_F ".%" U16_F, a, b, c, d)) +#define ip4_addr_debug_print(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? ip4_addr1_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr2_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr3_16(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? ip4_addr4_16(ipaddr) : 0)) +#define ip4_addr_debug_print_val(debug, ipaddr) \ + ip4_addr_debug_print_parts(debug, \ + ip4_addr1_16_val(ipaddr), \ + ip4_addr2_16_val(ipaddr), \ + ip4_addr3_16_val(ipaddr), \ + ip4_addr4_16_val(ipaddr)) + +/* Get one byte from the 4-byte address */ +#define ip4_addr_get_byte(ipaddr, idx) (((const u8_t*)(&(ipaddr)->addr))[idx]) +#define ip4_addr1(ipaddr) ip4_addr_get_byte(ipaddr, 0) +#define ip4_addr2(ipaddr) ip4_addr_get_byte(ipaddr, 1) +#define ip4_addr3(ipaddr) ip4_addr_get_byte(ipaddr, 2) +#define ip4_addr4(ipaddr) ip4_addr_get_byte(ipaddr, 3) +/* Get one byte from the 4-byte address, but argument is 'ip4_addr_t', + * not a pointer */ +#define ip4_addr_get_byte_val(ipaddr, idx) ((u8_t)(((ipaddr).addr >> (idx * 8)) & 0xff)) +#define ip4_addr1_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 0) +#define ip4_addr2_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 1) +#define ip4_addr3_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 2) +#define ip4_addr4_val(ipaddr) ip4_addr_get_byte_val(ipaddr, 3) +/* These are cast to u16_t, with the intent that they are often arguments + * to printf using the U16_F format from cc.h. */ +#define ip4_addr1_16(ipaddr) ((u16_t)ip4_addr1(ipaddr)) +#define ip4_addr2_16(ipaddr) ((u16_t)ip4_addr2(ipaddr)) +#define ip4_addr3_16(ipaddr) ((u16_t)ip4_addr3(ipaddr)) +#define ip4_addr4_16(ipaddr) ((u16_t)ip4_addr4(ipaddr)) +#define ip4_addr1_16_val(ipaddr) ((u16_t)ip4_addr1_val(ipaddr)) +#define ip4_addr2_16_val(ipaddr) ((u16_t)ip4_addr2_val(ipaddr)) +#define ip4_addr3_16_val(ipaddr) ((u16_t)ip4_addr3_val(ipaddr)) +#define ip4_addr4_16_val(ipaddr) ((u16_t)ip4_addr4_val(ipaddr)) + +#define IP4ADDR_STRLEN_MAX 16 + +/** For backwards compatibility */ +#define ip_ntoa(ipaddr) ipaddr_ntoa(ipaddr) + +u32_t ipaddr_addr(const char *cp); +int ip4addr_aton(const char *cp, ip4_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip4addr_ntoa(const ip4_addr_t *addr); +char *ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen); + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP_ADDR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h new file mode 100644 index 0000000..18cd2d0 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip4_frag.h @@ -0,0 +1,100 @@ +/** + * @file + * IP fragmentation/reassembly + */ + +/* + * Copyright (c) 2001-2004 Swedish Institute of Computer Science. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Jani Monoses + * + */ + +#ifndef LWIP_HDR_IP4_FRAG_H +#define LWIP_HDR_IP4_FRAG_H + +#include "lwip/opt.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" +#include "lwip/ip_addr.h" +#include "lwip/ip.h" + +#if LWIP_IPV4 + +#ifdef __cplusplus +extern "C" { +#endif + +#if IP_REASSEMBLY +/* The IP reassembly timer interval in milliseconds. */ +#define IP_TMR_INTERVAL 1000 + +/** IP reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip_reassdata { + struct ip_reassdata *next; + struct pbuf *p; + struct ip_hdr iphdr; + u16_t datagram_len; + u8_t flags; + u8_t timer; +}; + +void ip_reass_init(void); +void ip_reass_tmr(void); +struct pbuf * ip4_reass(struct pbuf *p); +#endif /* IP_REASSEMBLY */ + +#if IP_FRAG +#if !LWIP_NETIF_TX_SINGLE_PBUF +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ +#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ + +err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest); +#endif /* IP_FRAG */ + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV4 */ + +#endif /* LWIP_HDR_IP4_FRAG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h new file mode 100644 index 0000000..706f66d --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6.h @@ -0,0 +1,93 @@ +/** + * @file + * + * IPv6 layer. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_H +#define LWIP_HDR_IP6_H + +#include "lwip/opt.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_addr.h" +#include "lwip/prot/ip6.h" +#include "lwip/def.h" +#include "lwip/pbuf.h" +#include "lwip/netif.h" + +#include "lwip/err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct netif *ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest); +const ip_addr_t *ip6_select_source_address(struct netif *netif, const ip6_addr_t * dest); +err_t ip6_input(struct pbuf *p, struct netif *inp); +err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth); +err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif *netif); +#if LWIP_NETIF_USE_HINTS +err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, + u8_t hl, u8_t tc, u8_t nexth, struct netif_hint *netif_hint); +#endif /* LWIP_NETIF_USE_HINTS */ +#if LWIP_IPV6_MLD +err_t ip6_options_add_hbh_ra(struct pbuf * p, u8_t nexth, u8_t value); +#endif /* LWIP_IPV6_MLD */ + +#define ip6_netif_get_local_ip(netif, dest) (((netif) != NULL) ? \ + ip6_select_source_address(netif, dest) : NULL) + +#if IP6_DEBUG +void ip6_debug_print(struct pbuf *p); +#else +#define ip6_debug_print(p) +#endif /* IP6_DEBUG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h new file mode 100644 index 0000000..7c1f913 --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_addr.h @@ -0,0 +1,352 @@ +/** + * @file + * + * IPv6 addresses. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * Structs and macros for handling IPv6 addresses. + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_ADDR_H +#define LWIP_HDR_IP6_ADDR_H + +#include "lwip/opt.h" +#include "def.h" + +#if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ + +#include "lwip/ip6_zone.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** This is the aligned version of ip6_addr_t, + used as local variable, on the stack, etc. */ +struct ip6_addr { + u32_t addr[4]; +#if LWIP_IPV6_SCOPES + u8_t zone; +#endif /* LWIP_IPV6_SCOPES */ +}; + +/** IPv6 address */ +typedef struct ip6_addr ip6_addr_t; + +/** Set an IPv6 partial address given by byte-parts */ +#define IP6_ADDR_PART(ip6addr, index, a,b,c,d) \ + (ip6addr)->addr[index] = PP_HTONL(LWIP_MAKEU32(a,b,c,d)) + +/** Set a full IPv6 address by passing the 4 u32_t indices in network byte order + (use PP_HTONL() for constants) */ +#define IP6_ADDR(ip6addr, idx0, idx1, idx2, idx3) do { \ + (ip6addr)->addr[0] = idx0; \ + (ip6addr)->addr[1] = idx1; \ + (ip6addr)->addr[2] = idx2; \ + (ip6addr)->addr[3] = idx3; \ + ip6_addr_clear_zone(ip6addr); } while(0) + +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK1(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK2(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[0])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK3(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK4(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[1])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK5(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK6(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[2])) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK7(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3]) >> 16) & 0xffff)) +/** Access address in 16-bit block */ +#define IP6_ADDR_BLOCK8(ip6addr) ((u16_t)((lwip_htonl((ip6addr)->addr[3])) & 0xffff)) + +/** Copy IPv6 address - faster than ip6_addr_set: no NULL check */ +#define ip6_addr_copy(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_copy_zone((dest), (src)); }while(0) +/** Safely copy one IPv6 address to another (src may be NULL) */ +#define ip6_addr_set(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : (src)->addr[0]; \ + (dest)->addr[1] = (src) == NULL ? 0 : (src)->addr[1]; \ + (dest)->addr[2] = (src) == NULL ? 0 : (src)->addr[2]; \ + (dest)->addr[3] = (src) == NULL ? 0 : (src)->addr[3]; \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src)); }while(0) + +/** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ +#define ip6_addr_copy_from_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; \ + ip6_addr_clear_zone(&dest); }while(0) + +/** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ +#define ip6_addr_copy_to_packed(dest, src) do{(dest).addr[0] = (src).addr[0]; \ + (dest).addr[1] = (src).addr[1]; \ + (dest).addr[2] = (src).addr[2]; \ + (dest).addr[3] = (src).addr[3]; }while(0) + +/** Set complete address to zero */ +#define ip6_addr_set_zero(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = 0; \ + ip6_addr_clear_zone(ip6addr);}while(0) + +/** Set address to ipv6 'any' (no need for lwip_htonl()) */ +#define ip6_addr_set_any(ip6addr) ip6_addr_set_zero(ip6addr) +/** Set address to ipv6 loopback address */ +#define ip6_addr_set_loopback(ip6addr) do{(ip6addr)->addr[0] = 0; \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr);}while(0) +/** Safely copy one IPv6 address to another and change byte order + * from host- to network-order. */ +#define ip6_addr_set_hton(dest, src) do{(dest)->addr[0] = (src) == NULL ? 0 : lwip_htonl((src)->addr[0]); \ + (dest)->addr[1] = (src) == NULL ? 0 : lwip_htonl((src)->addr[1]); \ + (dest)->addr[2] = (src) == NULL ? 0 : lwip_htonl((src)->addr[2]); \ + (dest)->addr[3] = (src) == NULL ? 0 : lwip_htonl((src)->addr[3]); \ + ip6_addr_set_zone((dest), (src) == NULL ? IP6_NO_ZONE : ip6_addr_zone(src));}while(0) + + +/** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ +#define ip6_addr_netcmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1])) + +/** + * Determine if two IPv6 address are on the same network. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the network identifiers of both address match, 0 if not + */ +#define ip6_addr_netcmp(addr1, addr2) (ip6_addr_netcmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/* Exact-host comparison *after* ip6_addr_netcmp() succeeded, for efficiency. */ +#define ip6_addr_nethostcmp(addr1, addr2) (((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) + +/** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ +#define ip6_addr_cmp_zoneless(addr1, addr2) (((addr1)->addr[0] == (addr2)->addr[0]) && \ + ((addr1)->addr[1] == (addr2)->addr[1]) && \ + ((addr1)->addr[2] == (addr2)->addr[2]) && \ + ((addr1)->addr[3] == (addr2)->addr[3])) +/** + * Determine if two IPv6 addresses are the same. In particular, the address + * part of both must be the same, and the zone must be compatible. + * + * @param addr1 IPv6 address 1 + * @param addr2 IPv6 address 2 + * @return 1 if the addresses are considered equal, 0 if not + */ +#define ip6_addr_cmp(addr1, addr2) (ip6_addr_cmp_zoneless((addr1), (addr2)) && \ + ip6_addr_cmp_zone((addr1), (addr2))) + +/** Compare IPv6 address to packed address and zone */ +#define ip6_addr_cmp_packed(ip6addr, paddr, zone_idx) (((ip6addr)->addr[0] == (paddr)->addr[0]) && \ + ((ip6addr)->addr[1] == (paddr)->addr[1]) && \ + ((ip6addr)->addr[2] == (paddr)->addr[2]) && \ + ((ip6addr)->addr[3] == (paddr)->addr[3]) && \ + ip6_addr_equals_zone((ip6addr), (zone_idx))) + +#define ip6_get_subnet_id(ip6addr) (lwip_htonl((ip6addr)->addr[2]) & 0x0000ffffUL) + +#define ip6_addr_isany_val(ip6addr) (((ip6addr).addr[0] == 0) && \ + ((ip6addr).addr[1] == 0) && \ + ((ip6addr).addr[2] == 0) && \ + ((ip6addr).addr[3] == 0)) +#define ip6_addr_isany(ip6addr) (((ip6addr) == NULL) || ip6_addr_isany_val(*(ip6addr))) + +#define ip6_addr_isloopback(ip6addr) (((ip6addr)->addr[0] == 0UL) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isglobal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xe0000000UL)) == PP_HTONL(0x20000000UL)) + +#define ip6_addr_islinklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfe800000UL)) + +#define ip6_addr_issitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xffc00000UL)) == PP_HTONL(0xfec00000UL)) + +#define ip6_addr_isuniquelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xfe000000UL)) == PP_HTONL(0xfc000000UL)) + +#define ip6_addr_isipv4mappedipv6(ip6addr) (((ip6addr)->addr[0] == 0) && ((ip6addr)->addr[1] == 0) && (((ip6addr)->addr[2]) == PP_HTONL(0x0000FFFFUL))) + +#define ip6_addr_ismulticast(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) +#define ip6_addr_multicast_transient_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00100000UL)) +#define ip6_addr_multicast_prefix_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00200000UL)) +#define ip6_addr_multicast_rendezvous_flag(ip6addr) ((ip6addr)->addr[0] & PP_HTONL(0x00400000UL)) +#define ip6_addr_multicast_scope(ip6addr) ((lwip_htonl((ip6addr)->addr[0]) >> 16) & 0xf) +#define IP6_MULTICAST_SCOPE_RESERVED 0x0 +#define IP6_MULTICAST_SCOPE_RESERVED0 0x0 +#define IP6_MULTICAST_SCOPE_INTERFACE_LOCAL 0x1 +#define IP6_MULTICAST_SCOPE_LINK_LOCAL 0x2 +#define IP6_MULTICAST_SCOPE_RESERVED3 0x3 +#define IP6_MULTICAST_SCOPE_ADMIN_LOCAL 0x4 +#define IP6_MULTICAST_SCOPE_SITE_LOCAL 0x5 +#define IP6_MULTICAST_SCOPE_ORGANIZATION_LOCAL 0x8 +#define IP6_MULTICAST_SCOPE_GLOBAL 0xe +#define IP6_MULTICAST_SCOPE_RESERVEDF 0xf +#define ip6_addr_ismulticast_iflocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff010000UL)) +#define ip6_addr_ismulticast_linklocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff020000UL)) +#define ip6_addr_ismulticast_adminlocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff040000UL)) +#define ip6_addr_ismulticast_sitelocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff050000UL)) +#define ip6_addr_ismulticast_orglocal(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff080000UL)) +#define ip6_addr_ismulticast_global(ip6addr) (((ip6addr)->addr[0] & PP_HTONL(0xff8f0000UL)) == PP_HTONL(0xff0e0000UL)) + +/* Scoping note: while interface-local and link-local multicast addresses do + * have a scope (i.e., they are meaningful only in the context of a particular + * interface), the following functions are not assigning or comparing zone + * indices. The reason for this is backward compatibility. Any call site that + * produces a non-global multicast address must assign a multicast address as + * appropriate itself. */ + +#define ip6_addr_isallnodes_iflocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff010000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) + +#define ip6_addr_isallnodes_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000001UL))) +#define ip6_addr_set_allnodes_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000001UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_isallrouters_linklocal(ip6addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0UL) && \ + ((ip6addr)->addr[2] == 0UL) && \ + ((ip6addr)->addr[3] == PP_HTONL(0x00000002UL))) +#define ip6_addr_set_allrouters_linklocal(ip6addr) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = 0; \ + (ip6addr)->addr[3] = PP_HTONL(0x00000002UL); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_issolicitednode(ip6addr) ( ((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + (((ip6addr)->addr[3] & PP_HTONL(0xff000000UL)) == PP_HTONL(0xff000000UL)) ) + +#define ip6_addr_set_solicitednode(ip6addr, if_id) do{(ip6addr)->addr[0] = PP_HTONL(0xff020000UL); \ + (ip6addr)->addr[1] = 0; \ + (ip6addr)->addr[2] = PP_HTONL(0x00000001UL); \ + (ip6addr)->addr[3] = (PP_HTONL(0xff000000UL) | (if_id)); \ + ip6_addr_clear_zone(ip6addr); }while(0) + +#define ip6_addr_cmp_solicitednode(ip6addr, sn_addr) (((ip6addr)->addr[0] == PP_HTONL(0xff020000UL)) && \ + ((ip6addr)->addr[1] == 0) && \ + ((ip6addr)->addr[2] == PP_HTONL(0x00000001UL)) && \ + ((ip6addr)->addr[3] == (PP_HTONL(0xff000000UL) | (sn_addr)->addr[3]))) + +/* IPv6 address states. */ +#define IP6_ADDR_INVALID 0x00 +#define IP6_ADDR_TENTATIVE 0x08 +#define IP6_ADDR_TENTATIVE_1 0x09 /* 1 probe sent */ +#define IP6_ADDR_TENTATIVE_2 0x0a /* 2 probes sent */ +#define IP6_ADDR_TENTATIVE_3 0x0b /* 3 probes sent */ +#define IP6_ADDR_TENTATIVE_4 0x0c /* 4 probes sent */ +#define IP6_ADDR_TENTATIVE_5 0x0d /* 5 probes sent */ +#define IP6_ADDR_TENTATIVE_6 0x0e /* 6 probes sent */ +#define IP6_ADDR_TENTATIVE_7 0x0f /* 7 probes sent */ +#define IP6_ADDR_VALID 0x10 /* This bit marks an address as valid (preferred or deprecated) */ +#define IP6_ADDR_PREFERRED 0x30 +#define IP6_ADDR_DEPRECATED 0x10 /* Same as VALID (valid but not preferred) */ +#define IP6_ADDR_DUPLICATED 0x40 /* Failed DAD test, not valid */ + +#define IP6_ADDR_TENTATIVE_COUNT_MASK 0x07 /* 1-7 probes sent */ + +#define ip6_addr_isinvalid(addr_state) (addr_state == IP6_ADDR_INVALID) +#define ip6_addr_istentative(addr_state) (addr_state & IP6_ADDR_TENTATIVE) +#define ip6_addr_isvalid(addr_state) (addr_state & IP6_ADDR_VALID) /* Include valid, preferred, and deprecated. */ +#define ip6_addr_ispreferred(addr_state) (addr_state == IP6_ADDR_PREFERRED) +#define ip6_addr_isdeprecated(addr_state) (addr_state == IP6_ADDR_DEPRECATED) +#define ip6_addr_isduplicated(addr_state) (addr_state == IP6_ADDR_DUPLICATED) + +#if LWIP_IPV6_ADDRESS_LIFETIMES +#define IP6_ADDR_LIFE_STATIC (0) +#define IP6_ADDR_LIFE_INFINITE (0xffffffffUL) +#define ip6_addr_life_isstatic(addr_life) ((addr_life) == IP6_ADDR_LIFE_STATIC) +#define ip6_addr_life_isinfinite(addr_life) ((addr_life) == IP6_ADDR_LIFE_INFINITE) +#endif /* LWIP_IPV6_ADDRESS_LIFETIMES */ + +#define ip6_addr_debug_print_parts(debug, a, b, c, d, e, f, g, h) \ + LWIP_DEBUGF(debug, ("%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F ":%" X16_F, \ + a, b, c, d, e, f, g, h)) +#define ip6_addr_debug_print(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK1(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK2(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK3(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK4(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK5(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK6(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK7(ipaddr) : 0), \ + (u16_t)((ipaddr) != NULL ? IP6_ADDR_BLOCK8(ipaddr) : 0)) +#define ip6_addr_debug_print_val(debug, ipaddr) \ + ip6_addr_debug_print_parts(debug, \ + IP6_ADDR_BLOCK1(&(ipaddr)), \ + IP6_ADDR_BLOCK2(&(ipaddr)), \ + IP6_ADDR_BLOCK3(&(ipaddr)), \ + IP6_ADDR_BLOCK4(&(ipaddr)), \ + IP6_ADDR_BLOCK5(&(ipaddr)), \ + IP6_ADDR_BLOCK6(&(ipaddr)), \ + IP6_ADDR_BLOCK7(&(ipaddr)), \ + IP6_ADDR_BLOCK8(&(ipaddr))) + +#define IP6ADDR_STRLEN_MAX 46 + +int ip6addr_aton(const char *cp, ip6_addr_t *addr); +/** returns ptr to static buffer; not reentrant! */ +char *ip6addr_ntoa(const ip6_addr_t *addr); +char *ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen); + + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_IPV6 */ + +#endif /* LWIP_HDR_IP6_ADDR_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h new file mode 100644 index 0000000..71940dd --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_frag.h @@ -0,0 +1,144 @@ +/** + * @file + * + * IPv6 fragmentation and reassembly. + */ + +/* + * Copyright (c) 2010 Inico Technologies Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: Ivan Delamer + * + * + * Please coordinate changes and requests with Ivan Delamer + * + */ +#ifndef LWIP_HDR_IP6_FRAG_H +#define LWIP_HDR_IP6_FRAG_H + +#include "lwip/opt.h" +#include "lwip/pbuf.h" +#include "lwip/ip6_addr.h" +#include "lwip/ip6.h" +#include "lwip/netif.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +#if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ + +/** The IPv6 reassembly timer interval in milliseconds. */ +#define IP6_REASS_TMR_INTERVAL 1000 + +/** IP6_FRAG_COPYHEADER==1: for platforms where sizeof(void*) > 4, "struct + * ip6_reass_helper" is too large to be stored in the IPv6 fragment header, and + * will bleed into the header before it, which may be the IPv6 header or an + * extension header. This means that for each first fragment packet, we need to + * 1) make a copy of some IPv6 header fields (src+dest) that we need later on, + * just in case we do overwrite part of the IPv6 header, and 2) make a copy of + * the header data that we overwrote, so that we can restore it before either + * completing reassembly or sending an ICMPv6 reply. The last part is true even + * if this setting is disabled, but if it is enabled, we need to save a bit + * more data (up to the size of a pointer) because we overwrite more. */ +#ifndef IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_COPYHEADER 0 +#endif + +/* With IPV6_FRAG_COPYHEADER==1, a helper structure may (or, depending on the + * presence of extensions, may not) overwrite part of the IP header. Therefore, + * we copy the fields that we need from the IP header for as long as the helper + * structure may still be in place. This is easier than temporarily restoring + * those fields in the IP header each time we need to perform checks on them. */ +#if IPV6_FRAG_COPYHEADER +#define IPV6_FRAG_SRC(ipr) ((ipr)->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->dest) +#else /* IPV6_FRAG_COPYHEADER */ +#define IPV6_FRAG_SRC(ipr) ((ipr)->iphdr->src) +#define IPV6_FRAG_DEST(ipr) ((ipr)->iphdr->dest) +#endif /* IPV6_FRAG_COPYHEADER */ + +/** IPv6 reassembly helper struct. + * This is exported because memp needs to know the size. + */ +struct ip6_reassdata { + struct ip6_reassdata *next; + struct pbuf *p; + struct ip6_hdr *iphdr; /* pointer to the first (original) IPv6 header */ +#if IPV6_FRAG_COPYHEADER + ip6_addr_p_t src; /* copy of the source address in the IP header */ + ip6_addr_p_t dest; /* copy of the destination address in the IP header */ + /* This buffer (for the part of the original header that we overwrite) will + * be slightly oversized, but we cannot compute the exact size from here. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr) + sizeof(void*)]; +#else /* IPV6_FRAG_COPYHEADER */ + /* In this case we still need the buffer, for sending ICMPv6 replies. */ + u8_t orig_hdr[sizeof(struct ip6_frag_hdr)]; +#endif /* IPV6_FRAG_COPYHEADER */ + u32_t identification; + u16_t datagram_len; + u8_t nexth; + u8_t timer; +#if LWIP_IPV6_SCOPES + u8_t src_zone; /* zone of original packet's source address */ + u8_t dest_zone; /* zone of original packet's destination address */ +#endif /* LWIP_IPV6_SCOPES */ +}; + +#define ip6_reass_init() /* Compatibility define */ +void ip6_reass_tmr(void); +struct pbuf *ip6_reass(struct pbuf *p); + +#endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ + +#if LWIP_IPV6 && LWIP_IPV6_FRAG /* don't build if not configured for use in lwipopts.h */ + +#ifndef LWIP_PBUF_CUSTOM_REF_DEFINED +#define LWIP_PBUF_CUSTOM_REF_DEFINED +/** A custom pbuf that holds a reference to another pbuf, which is freed + * when this custom pbuf is freed. This is used to create a custom PBUF_REF + * that points into the original pbuf. */ +struct pbuf_custom_ref { + /** 'base class' */ + struct pbuf_custom pc; + /** pointer to the original pbuf that is referenced */ + struct pbuf *original; +}; +#endif /* LWIP_PBUF_CUSTOM_REF_DEFINED */ + +err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest); + +#endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */ + + +#ifdef __cplusplus +} +#endif + +#endif /* LWIP_HDR_IP6_FRAG_H */ diff --git a/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h new file mode 100644 index 0000000..235141e --- /dev/null +++ b/micro-ros-stm32_embeddedrtps/micro-ros_embeddedrtps/Middlewares/Third_Party/LwIP/src/include/lwip/ip6_zone.h @@ -0,0 +1,304 @@ +/** + * @file + * + * IPv6 address scopes, zones, and scoping policy. + * + * This header provides the means to implement support for IPv6 address scopes, + * as per RFC 4007. An address scope can be either global or more constrained. + * In lwIP, we say that an address "has a scope" or "is scoped" when its scope + * is constrained, in which case the address is meaningful only in a specific + * "zone." For unicast addresses, only link-local addresses have a scope; in + * that case, the scope is the link. For multicast addresses, there are various + * scopes defined by RFC 4007 and others. For any constrained scope, a system + * must establish a (potentially one-to-many) mapping between zones and local + * interfaces. For example, a link-local address is valid on only one link (its + * zone). That link may be attached to one or more local interfaces. The + * decisions on which scopes are constrained and the mapping between zones and + * interfaces is together what we refer to as the "scoping policy" - more on + * this in a bit. + * + * In lwIP, each IPv6 address has an associated zone index. This zone index may + * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an + * address "has a zone" or "is zoned" when its zone index is *not* set to "no + * zone." In lwIP, in principle, each address should be "properly zoned," which + * means that if the address has a zone if and only if has a scope. As such, it + * is a rule that an unscoped (e.g., global) address must never have a zone. + * Even though one could argue that there is always one zone even for global + * scopes, this rule exists for implementation simplicity. Violation of the + * rule will trigger assertions or otherwise result in undesired behavior. + * + * Backward compatibility prevents us from requiring that applications always + * provide properly zoned addresses. We do enforce the rule that the in the + * lwIP link layer (everything below netif->output_ip6() and in particular ND6) + * *all* addresses are properly zoned. Thus, on the output paths down the + * stack, various places deal with the case of addresses that lack a zone. + * Some of them are best-effort for efficiency (e.g. the PCB bind and connect + * API calls' attempts to add missing zones); ultimately the IPv6 output + * handler (@ref ip6_output_if_src) will set a zone if necessary. + * + * Aside from dealing with scoped addresses lacking a zone, a proper IPv6 + * implementation must also ensure that a packet with a scoped source and/or + * destination address does not leave its zone. This is currently implemented + * in the input and forward functions. However, for output, these checks are + * deliberately omitted in order to keep the implementation lightweight. The + * routing algorithm in @ref ip6_route will take decisions such that it will + * not cause zone violations unless the application sets bad addresses, though. + * + * In terms of scoping policy, lwIP implements the default policy from RFC 4007 + * using macros in this file. This policy considers link-local unicast + * addresses and (only) interface-local and link-local multicast addresses as + * having a scope. For all these addresses, the zone is equal to the interface. + * As shown below in this file, it is possible to implement a custom policy. + */ + +/* + * Copyright (c) 2017 The MINIX 3 Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * This file is part of the lwIP TCP/IP stack. + * + * Author: David van Moolenbroek