diff --git a/.github/lexicon.txt b/.github/lexicon.txt index ec6577e9e6..853c73f220 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -319,6 +319,7 @@ coprocessor coprocessors coreid coroutinehandle +coverity covfs cp cpacr @@ -340,31 +341,20 @@ cprivilegedonlyaccessarray cpsid cpsie cpsr -cpsr -cpstored cpstored cpu -cpu cr -cr -crc crc crcb crcoroutine -crcoroutine -crdelay crdelay creadonlyarray creadwritearray createevent crend -crend crgint croutine -croutine crqueue -crqueue -crstart crstart crt crtv @@ -1110,6 +1100,7 @@ mikroc min mingw minilistitem +minimalidle mips misadd misc @@ -1539,12 +1530,11 @@ prstc prttc prv prvaddcurrenttasktodelayedlist +prvcheckforrunstatechange prvcheckinterfaces prvchecktaskswaitingtermination prvcopydatatoqueue prvcoroutineflashtask -prvcoroutineflashtask -prvcoroutineflashworktask prvcoroutineflashworktask prvdeletetcb prvexitfunction @@ -1552,12 +1542,12 @@ prvgettimens prvheapinit prvidletask prvinitialisecoroutinelists -prvinitialisecoroutinelists prvinitialisemutex prvinitialisenewstreambuffer prvinitialisenewtimer prvinsertblockintofreelist prvlockqueue +prvminimalidletask prvnotifyqueuesetcontainer prvportmalloc prvportresetpic @@ -1575,6 +1565,7 @@ prvtickcount prvtimercallback prvwritebytestobuffer prvwritemessagetobuffer +prvyieldfortask prvyieldhandler ps psp @@ -1668,13 +1659,10 @@ pxcallbackfunction pxcode pxcontainer pxcoroutinecode -pxcoroutinecode -pxcoroutinewoken pxcoroutinewoken pxcrcb pxcreatedtask pxcurrentcoroutine -pxcurrentcoroutine pxcurrenttcb pxcurrenttcbconst pxcurrenttimerlist @@ -1795,6 +1783,7 @@ rdc rdr rdrf rdy +reacquisition readbit readme readvalue @@ -2439,6 +2428,7 @@ uxbitstoset uxbitstowait uxbitstowaitfor uxcontrolbits +uxcoreaffinitymask uxcriticalnesting uxcurrenteventbits uxcurrentnumberoftasks @@ -2462,9 +2452,12 @@ uxlength uxlistremove uxmaxcount uxmessageswaiting +uxnetworkingcoreaffinitymask uxnewpriority uxoriginalpriority +uxpendedcounts uxportcomparesetextram +uxprevschedulersuspended uxpriority uxprioritytouse uxqueue @@ -2483,6 +2476,7 @@ uxsemaphoregetcount uxsemaphoregetcountfromisr uxstate uxstreambuffernumber +uxtaskattributes uxtaskgetnumberoftasks uxtaskgetstackhighwatermark uxtaskgetsystemstate @@ -2516,6 +2510,7 @@ vapplicationgettimertaskmemory vapplicationidlehook vapplicationirqhandler vapplicationmallocfailedhook +vapplicationminimalidlehook vapplicationsetuptickinterrupt vapplicationsetupticktimerinterrupt vapplicationsetuptimerinterrupt @@ -2618,12 +2613,15 @@ vstreambufferdelete vtask vtaskallocatempuregions vtaskcode +vtaskcoreaffinityget +vtaskcoreaffinityset vtaskdelay vtaskdelayuntil vtaskdelete vtaskendscheduler vtaskentercritical vtaskexitcritical +vtaskexitcriticalfromisr vtaskfunction vtaskgetinfo vtaskgetruntimestats @@ -2632,6 +2630,8 @@ vtasknotify vtasknotifygivefromisr vtasknotifygiveindexedfromisr vtaskplaceoneventlist +vtaskpreemptiondisable +vtaskpreemptionenable vtaskpriorityset vtaskremovefromunorderedeventlist vtaskresume @@ -2645,6 +2645,7 @@ vtasksteptick vtasksuspend vtasksuspendall vtaskswitchcontext +vtaskswitchcontextforcore vtaskusesdpfpu vtickisr vtimercallback @@ -2742,6 +2743,8 @@ xcommandtime xcommsrxqueue xconsttickcount xcopyposition +xcore +xcoreid xcoroutinecreate xcoroutinepreviouslywoken xcoroutinequeue @@ -2824,6 +2827,8 @@ xhigherpriorittaskwoken xhigherprioritytaskwoken xhigherprioritytaskwokenbypost xidletaskhandle +xidletaskhandles +xidletcbbuffers xilinx xindex xinheritanceoccurred @@ -2843,6 +2848,9 @@ xlastwaketime xlength xlist xlistend +xlowestpriority +xlowestprioritycore +xlowestprioritytopreempt xmair xmaxcount xmaxexpirycountbeforestopping @@ -2881,6 +2889,7 @@ xmutexbuffer xmutexholder xn xnearstartscheduler +xnetworkingtaskhandle xnewperiod xnewqueue xnextexpiretime @@ -2913,6 +2922,7 @@ xportregistercinterrupthandler xportregisterdump xportstartfirsttask xportstartscheduler +xpreemptiondisable xpsr xqueue xqueueaddtoset @@ -3026,6 +3036,7 @@ xtail xtal xtask xtaskabortdelay +xtaskattribute xtaskbuffer xtaskcallapplicationtaskhook xtaskcatchupticks @@ -3072,6 +3083,7 @@ xtasknumber xtaskremovefromeventlist xtaskresumeall xtaskresumefromisr +xtaskrunstate xtaskswaitingforbits xtaskswaitingtermination xtaskswaitingtoreceive @@ -3108,6 +3120,7 @@ xtimercreate xtimercreated xtimercreatestatic xtimerdelete +xtimergenericcommand xtimergetexpirytime xtimergetperiod xtimergetreloadmode @@ -3143,5 +3156,8 @@ xwantedsize xwasdelayed xwritevalue xxr +xyieldfortask xyieldpending +xyieldpendings xzr +yeilding diff --git a/.github/scripts/kernel_checker.py b/.github/scripts/kernel_checker.py index f24bbf2572..a0706932fe 100755 --- a/.github/scripts/kernel_checker.py +++ b/.github/scripts/kernel_checker.py @@ -87,7 +87,8 @@ r'.*\.git.*', r'.*portable/IAR/AtmelSAM7S64/.*AT91SAM7.*', r'.*portable/GCC/ARM7_AT91SAM7S/.*', - r'.*portable/MPLAB/PIC18F/stdio.h' + r'.*portable/MPLAB/PIC18F/stdio.h', + r'.*portable/ThirdParty/xClang/XCOREAI/*' ] KERNEL_THIRD_PARTY_PATTERNS = [ diff --git a/MISRA.md b/MISRA.md new file mode 100644 index 0000000000..e7ebf77ea6 --- /dev/null +++ b/MISRA.md @@ -0,0 +1,72 @@ +# MISRA Compliance + +FreeRTOS-Kernel conforms to [MISRA C:2012](https://www.misra.org.uk/misra-c) +guidelines, with the deviations listed below. Compliance is checked with +Coverity static analysis. Since the FreeRTOS kernel is designed for +small-embedded devices, it needs to have a very small memory footprint and +has to be efficient. To achieve that and to increase the performance, it +deviates from some MISRA rules. The specific deviations, suppressed inline, +are listed below. + +Additionally, [MISRA configuration](#misra-configuration) contains project +wide deviations. + +### Suppressed with Coverity Comments +To find the violation references in the source files run grep on the source code +with ( Assuming rule 8.4 violation; with justification in point 1 ): +``` +grep 'MISRA Ref 8.4.1' . -rI +``` + +#### Rule 8.4 + +_Ref 8.4.1_ + +- MISRA C:2012 Rule 8.4: A compatible declaration shall be visible when an + object or function with external linkage is defined. + This rule requires that a compatible declaration is made available + in a header file when an object with external linkage is defined. + pxCurrentTCB(s) is defined with external linkage but it is only + referenced from the assembly code in the port files. Therefore, adding + a declaration in header file is not useful as the assembly code will + still need to declare it separately. + +### MISRA configuration + +Copy below content to `misra.conf` to run Coverity on FreeRTOS-Kernel. + +``` +// MISRA C-2012 Rules +{ + version : "2.0", + standard : "c2012", + title: "Coverity MISRA Configuration", + deviations : [ + // Disable the following rules. + { + deviation: "Directive 4.8", + reason: "HeapRegion_t and HeapStats_t are used only in heap files but declared in portable.h which is included in multiple source files. As a result, these definitions appear in multiple source files where they are not used." + }, + { + deviation: "Directive 4.9", + reason: "FreeRTOS-Kernel is optimised to work on small micro-controllers. To achieve that, function-like macros are used." + }, + { + deviation: "Rule 1.2", + reason: "The __attribute__ tags are used via macros which are defined in port files." + }, + { + deviation: "Rule 3.1", + reason: "We post HTTP links in code comments which contain // inside comments blocks." + }, + { + deviation: "Rule 8.7", + reason: "API functions are not used by the library outside of the files they are defined; however, they must be externally visible in order to be used by an application." + }, + { + deviation: "Rule 11.5", + reason: "Allow casts from `void *`. List owner, pvOwner, is stored as `void *` and are cast to various types for use in functions." + } + ] +} +``` \ No newline at end of file diff --git a/event_groups.c b/event_groups.c index d7238d89a3..ea4c79f4b1 100644 --- a/event_groups.c +++ b/event_groups.c @@ -263,7 +263,15 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -415,7 +423,15 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, { if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -525,11 +541,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 831ed037b1..5016ebf5ee 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -86,6 +86,11 @@ #define configUSE_MPU_WRAPPERS_V1 0 #endif +/* Set default value of configNUMBER_OF_CORES to 1 to use single core FreeRTOS. */ +#ifndef configNUMBER_OF_CORES + #define configNUMBER_OF_CORES 1 +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" @@ -164,6 +169,12 @@ #error Missing definition: configUSE_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif +#if ( configNUMBER_OF_CORES > 1 ) + #ifndef configUSE_MINIMAL_IDLE_HOOK + #error Missing definition: configUSE_MINIMAL_IDLE_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. + #endif +#endif + #ifndef configUSE_TICK_HOOK #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif @@ -306,6 +317,10 @@ #define configUSE_COUNTING_SEMAPHORES 0 #endif +#ifndef configUSE_TASK_PREEMPTION_DISABLE + #define configUSE_TASK_PREEMPTION_DISABLE 0 +#endif + #ifndef configUSE_ALTERNATIVE_API #define configUSE_ALTERNATIVE_API 0 #endif @@ -353,6 +368,110 @@ #define portSOFTWARE_BARRIER() #endif +#ifndef configRUN_MULTIPLE_PRIORITIES + #define configRUN_MULTIPLE_PRIORITIES 0 +#endif + +#ifndef portGET_CORE_ID + + #if ( configNUMBER_OF_CORES == 1 ) + #define portGET_CORE_ID() 0 + #else + #error configNUMBER_OF_CORES is set to more than 1 then portGET_CORE_ID must also be defined. + #endif /* configNUMBER_OF_CORES */ + +#endif /* portGET_CORE_ID */ + +#ifndef portYIELD_CORE + + #if ( configNUMBER_OF_CORES == 1 ) + #define portYIELD_CORE( x ) portYIELD() + #else + #error configNUMBER_OF_CORES is set to more than 1 then portYIELD_CORE must also be defined. + #endif /* configNUMBER_OF_CORES */ + +#endif /* portYIELD_CORE */ + +#ifndef portSET_INTERRUPT_MASK + + #if ( configNUMBER_OF_CORES > 1 ) + #error portSET_INTERRUPT_MASK is required in SMP + #endif + +#endif /* portSET_INTERRUPT_MASK */ + +#ifndef portCLEAR_INTERRUPT_MASK + + #if ( configNUMBER_OF_CORES > 1 ) + #error portCLEAR_INTERRUPT_MASK is required in SMP + #endif + +#endif /* portCLEAR_INTERRUPT_MASK */ + +#ifndef portRELEASE_TASK_LOCK + + #if ( configNUMBER_OF_CORES == 1 ) + #define portRELEASE_TASK_LOCK() + #else + #error portRELEASE_TASK_LOCK is required in SMP + #endif + +#endif /* portRELEASE_TASK_LOCK */ + +#ifndef portGET_TASK_LOCK + + #if ( configNUMBER_OF_CORES == 1 ) + #define portGET_TASK_LOCK() + #else + #error portGET_TASK_LOCK is required in SMP + #endif + +#endif /* portGET_TASK_LOCK */ + +#ifndef portRELEASE_ISR_LOCK + + #if ( configNUMBER_OF_CORES == 1 ) + #define portRELEASE_ISR_LOCK() + #else + #error portRELEASE_ISR_LOCK is required in SMP + #endif + +#endif /* portRELEASE_ISR_LOCK */ + +#ifndef portGET_ISR_LOCK + + #if ( configNUMBER_OF_CORES == 1 ) + #define portGET_ISR_LOCK() + #else + #error portGET_ISR_LOCK is required in SMP + #endif + +#endif /* portGET_ISR_LOCK */ + +#ifndef portCHECK_IF_IN_ISR + + #if ( configNUMBER_OF_CORES > 1 ) + #error portCHECK_IF_IN_ISR is required in SMP + #endif + +#endif /* portCHECK_IF_IN_ISR */ + +#ifndef portENTER_CRITICAL_FROM_ISR + + #if ( configNUMBER_OF_CORES > 1 ) + #error portENTER_CRITICAL_FROM_ISR is required in SMP + #endif + +#endif + +#ifndef portEXIT_CRITICAL_FROM_ISR + + #if ( configNUMBER_OF_CORES > 1 ) + #error portEXIT_CRITICAL_FROM_ISR is required in SMP + #endif + +#endif + /* The timers module relies on xTaskGetSchedulerState(). */ #if configUSE_TIMERS == 1 @@ -368,6 +487,10 @@ #error If configUSE_TIMERS is set to 1 then configTIMER_TASK_STACK_DEPTH must also be defined. #endif /* configTIMER_TASK_STACK_DEPTH */ + #ifndef portTIMER_CALLBACK_ATTRIBUTE + #define portTIMER_CALLBACK_ATTRIBUTE + #endif /* portTIMER_CALLBACK_ATTRIBUTE */ + #endif /* configUSE_TIMERS */ #ifndef portSET_INTERRUPT_MASK_FROM_ISR @@ -1018,6 +1141,18 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif +#if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configRUN_MULTIPLE_PRIORITIES must be set to 1 to use task preemption disable +#endif + +#if ( ( configUSE_PREEMPTION == 0 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configUSE_PREEMPTION must be set to 1 to use task preemption disable +#endif + +#if ( ( configNUMBER_OF_CORES == 1 ) && ( configUSE_TASK_PREEMPTION_DISABLE != 0 ) ) + #error configUSE_TASK_PREEMPTION_DISABLE is not supported in single core FreeRTOS +#endif + #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif @@ -1156,7 +1291,6 @@ #define configRUN_ADDITIONAL_TESTS 0 #endif - /* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using * dynamically allocated RAM, in which case when any task is deleted it is known * that both the task's stack and TCB need to be freed. Sometimes the @@ -1274,10 +1408,20 @@ typedef struct xSTATIC_TCB #if ( portUSING_MPU_WRAPPERS == 1 ) xMPU_SETTINGS xDummy2; #endif + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) + UBaseType_t uxDummy26; + #endif StaticListItem_t xDummy3[ 2 ]; UBaseType_t uxDummy5; void * pxDummy6; + #if ( configNUMBER_OF_CORES > 1 ) + BaseType_t xDummy23; + UBaseType_t uxDummy24; + #endif uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xDummy25; + #endif #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif diff --git a/include/task.h b/include/task.h index af17ced175..a7e1c795e3 100644 --- a/include/task.h +++ b/include/task.h @@ -89,7 +89,8 @@ * \ingroup Tasks */ struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ -typedef struct tskTaskControlBlock * TaskHandle_t; +typedef struct tskTaskControlBlock * TaskHandle_t; +typedef const struct tskTaskControlBlock * ConstTaskHandle_t; /* * Defines the prototype to which the application task hook function must @@ -171,6 +172,9 @@ typedef struct xTASK_STATUS StackType_t * pxEndOfStack; /* Points to the end address of the task's stack area. */ #endif configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxCoreAffinityMask; /* The core affinity mask for the task */ + #endif } TaskStatus_t; /* Possible return values for eTaskConfirmSleepModeStatus(). */ @@ -190,6 +194,13 @@ typedef enum */ #define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) +/** + * Defines affinity to all available cores. + * + * \ingroup TaskUtils + */ +#define tskNO_AFFINITY ( ( UBaseType_t ) -1 ) + /** * task. h * @@ -198,7 +209,7 @@ typedef enum * \defgroup taskYIELD taskYIELD * \ingroup SchedulerControl */ -#define taskYIELD() portYIELD() +#define taskYIELD() portYIELD() /** * task. h @@ -212,8 +223,12 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() -#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#if ( configNUMBER_OF_CORES == 1 ) + #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#else + #define taskENTER_CRITICAL_FROM_ISR() portENTER_CRITICAL_FROM_ISR() +#endif /** * task. h @@ -227,8 +242,12 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() -#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#if ( configNUMBER_OF_CORES == 1 ) + #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#else + #define taskEXIT_CRITICAL_FROM_ISR( x ) portEXIT_CRITICAL_FROM_ISR( x ) +#endif /** * task. h @@ -238,7 +257,7 @@ typedef enum * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS * \ingroup SchedulerControl */ -#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() /** * task. h @@ -248,7 +267,7 @@ typedef enum * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS * \ingroup SchedulerControl */ -#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() /* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is * 0 to generate more optimal code when configASSERT() is defined as the constant @@ -257,6 +276,8 @@ typedef enum #define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) #define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) +/* Checks if core ID is valid. */ +#define taskVALID_CORE_ID( xCoreID ) ( ( ( ( ( BaseType_t ) 0 <= ( xCoreID ) ) && ( ( xCoreID ) < ( BaseType_t ) configNUMBER_OF_CORES ) ) ) ? ( pdTRUE ) : ( pdFALSE ) ) /*----------------------------------------------------------- * TASK CREATION API @@ -365,6 +386,16 @@ typedef enum TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -483,6 +514,17 @@ typedef enum StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; #endif /* configSUPPORT_STATIC_ALLOCATION */ +#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -561,6 +603,12 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -651,6 +699,12 @@ typedef enum TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; #endif +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + /** * task. h * @code{c} @@ -698,7 +752,7 @@ typedef enum * \defgroup vTaskAllocateMPURegions vTaskAllocateMPURegions * \ingroup Tasks */ -void vTaskAllocateMPURegions( TaskHandle_t xTask, +void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; /** @@ -1228,6 +1282,164 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; */ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +#if ( configUSE_CORE_AFFINITY == 1 ) + +/** + * @brief Sets the core affinity mask for a task. + * + * It sets the cores on which a task can run. configUSE_CORE_AFFINITY must + * be defined as 1 for this function to be available. + * + * @param xTask The handle of the task to set the core affinity mask for. + * Passing NULL will set the core affinity mask for the calling task. + * + * @param uxCoreAffinityMask A bitwise value that indicates the cores on + * which the task can run. Cores are numbered from 0 to configNUMBER_OF_CORES - 1. + * For example, to ensure that a task can run on core 0 and core 1, set + * uxCoreAffinityMask to 0x03. + * + * Example usage: + * + * // The function that creates task. + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * // Define the core affinity mask such that this task can only run + * // on core 0 and core 2. + * uxCoreAffinityMask = ( ( 1 << 0 ) | ( 1 << 2 ) ); + * + * //Set the core affinity mask for the task. + * vTaskCoreAffinitySet( xHandle, uxCoreAffinityMask ); + * } + */ + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ); +#endif + +#if ( configUSE_CORE_AFFINITY == 1 ) + +/** + * @brief Gets the core affinity mask for a task. + * + * configUSE_CORE_AFFINITY must be defined as 1 for this function to be + * available. + * + * @param xTask The handle of the task to get the core affinity mask for. + * Passing NULL will get the core affinity mask for the calling task. + * + * @return The core affinity mask which is a bitwise value that indicates + * the cores on which a task can run. Cores are numbered from 0 to + * configNUMBER_OF_CORES - 1. For example, if a task can run on core 0 and core 1, + * the core affinity mask is 0x03. + * + * Example usage: + * + * // Task handle of the networking task - it is populated elsewhere. + * TaskHandle_t xNetworkingTaskHandle; + * + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxNetworkingCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * //Get the core affinity mask for the networking task. + * uxNetworkingCoreAffinityMask = vTaskCoreAffinityGet( xNetworkingTaskHandle ); + * + * // Here is a hypothetical scenario, just for the example. Assume that we + * // have 2 cores - Core 0 and core 1. We want to pin the application task to + * // the core different than the networking task to ensure that the + * // application task does not interfere with networking. + * if( ( uxNetworkingCoreAffinityMask & ( 1 << 0 ) ) != 0 ) + * { + * // The networking task can run on core 0, pin our task to core 1. + * vTaskCoreAffinitySet( xHandle, ( 1 << 1 ) ); + * } + * else + * { + * // Otherwise, pin our task to core 0. + * vTaskCoreAffinitySet( xHandle, ( 1 << 0 ) ); + * } + * } + */ + UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask ); +#endif + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + +/** + * @brief Disables preemption for a task. + * + * @param xTask The handle of the task to disable preemption. Passing NULL + * disables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ + void vTaskPreemptionDisable( const TaskHandle_t xTask ); +#endif + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + +/** + * @brief Enables preemption for a task. + * + * @param xTask The handle of the task to enable preemption. Passing NULL + * enables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ + void vTaskPreemptionEnable( const TaskHandle_t xTask ); +#endif + /*----------------------------------------------------------- * SCHEDULER CONTROL *----------------------------------------------------------*/ @@ -3099,7 +3311,11 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, * Sets the pointer to the current TCB to the TCB of the highest priority task * that is ready to run. */ -portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; +#if ( configNUMBER_OF_CORES == 1 ) + portDONT_DISCARD void vTaskSwitchContext( void ) PRIVILEGED_FUNCTION; +#else + portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; +#endif /* * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY @@ -3112,6 +3328,11 @@ TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; */ TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; +/* + * Return the handle of the task running on specified core. + */ +TaskHandle_t xTaskGetCurrentTaskHandleCPU( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; + /* * Shortcut used by the queue implementation to prevent unnecessary call to * taskYIELD(); @@ -3197,6 +3418,48 @@ TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; */ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; +/* + * For internal use only. Same as portYIELD_WITHIN_API() in single core FreeRTOS. + * For SMP this is not defined by the port. + */ +void vTaskYieldWithinAPI( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when portCRITICAL_NESTING_IN_TCB is set to 1 or configNUMBER_OF_CORES + * is greater than 1. This function can be used in the implementation of portENTER_CRITICAL + * if port wants to maintain critical nesting count in TCB in single core FreeRTOS. + * It should be used in the implementation of portENTER_CRITICAL if port is running a + * multiple core FreeRTOS. + */ +void vTaskEnterCritical( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when portCRITICAL_NESTING_IN_TCB is set to 1 or configNUMBER_OF_CORES + * is greater than 1. This function can be used in the implementation of portEXIT_CRITICAL + * if port wants to maintain critical nesting count in TCB in single core FreeRTOS. + * It should be used in the implementation of portEXIT_CRITICAL if port is running a + * multiple core FreeRTOS. + */ +void vTaskExitCritical( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when configNUMBER_OF_CORES is greater than 1. This function + * should be used in the implementation of portENTER_CRITICAL_FROM_ISR if port is + * running a multiple core FreeRTOS. + */ +UBaseType_t vTaskEnterCriticalFromISR( void ); + +/* + * This function is only intended for use when implementing a port of the scheduler + * and is only available when configNUMBER_OF_CORES is greater than 1. This function + * should be used in the implementation of portEXIT_CRITICAL_FROM_ISR if port is + * running a multiple core FreeRTOS. + */ +void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); + #if ( portUSING_MPU_WRAPPERS == 1 ) /* diff --git a/include/timers.h b/include/timers.h index 2967a4674c..054ec097fa 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1348,12 +1348,29 @@ TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; * for use by the kernel only. */ BaseType_t xTimerCreateTimerTask( void ) PRIVILEGED_FUNCTION; -BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +/* + * Splitting the xTimerGenericCommand into two sub functions and making it a macro + * removes a recursion path when called from ISRs. This is primarily for the XCore + * XCC port which detects the recursion path and throws an error during compilation + * when this is not split. + */ +BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +#define xTimerGenericCommand( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) \ + ( ( xCommandID ) < tmrFIRST_FROM_ISR_COMMAND ? \ + xTimerGenericCommandFromTask( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) : \ + xTimerGenericCommandFromISR( xTimer, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ) ) #if ( configUSE_TRACE_FACILITY == 1 ) void vTimerSetTimerNumber( TimerHandle_t xTimer, UBaseType_t uxTimerNumber ) PRIVILEGED_FUNCTION; diff --git a/portable/ThirdParty/GCC/RP2040/README.md b/portable/ThirdParty/GCC/RP2040/README.md index c50cb4e388..7c4dbe23ad 100644 --- a/portable/ThirdParty/GCC/RP2040/README.md +++ b/portable/ThirdParty/GCC/RP2040/README.md @@ -1,12 +1,13 @@ ## Overview -This directory provides a FreeRTOS-Kernel port that can be used with the Raspberry Pi Pico SDK. It supports: +This directory provides an SMP FreeRTOS-Kernel port that can be used with the Raspberry Pi Pico SDK. It supports: * Simple CMake INTERFACE libraries, to provide the FreeRTOS-Kernel and also the individual allocator types, without copying code into the user's project. - * Running the FreeRTOS-Kernel and tasks on either core 0 or core 1 - * Use of SDK synchronization primitives (such as mutexes, semaphores, queues from pico_sync) between FreeRTOS tasks and code executing on the other core, or in IRQ handlers. + * Running the FreeRTOS-Kernel and tasks on either core 0 or core 1, or both. + * Use of SDK synchronization primitives (such as mutexes, semaphores, queues from pico_sync) between FreeRTOS tasks and code executing on a non FreeRTOS core, or in IRQ handlers. -Note that a FreeRTOS SMP version of this port is also available in the FreeRTOS-Kernel smp branch, which additionally supports utilizing both RP2040 CPU cores for FreeRTOS tasks simultaneously. +Note that whilst this SMP version can be run on just a single (either) core, it is probably +more efficient to use the non SMP version in the main FreeRTOS-Kernel branch in that case. ## Using this port diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 973ac39f03..655b8ec744 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -37,6 +37,8 @@ /* *INDENT-ON* */ #include "pico.h" + #include "hardware/sync.h" + /*----------------------------------------------------------- * Port specific definitions. * @@ -108,14 +110,56 @@ #define xPortSysTickHandler isr_systick #endif +/*-----------------------------------------------------------*/ + +/* Multi-core */ + #define portMAX_CORE_COUNT 2 + + /* Check validity of number of cores specified in config */ + #if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES ) + #error "Invalid number of cores specified in config!" + #endif + + #if ( configTICK_CORE < 0 || configTICK_CORE > configNUMBER_OF_CORES ) + #error "Invalid tick core specified in config!" + #endif + /* FreeRTOS core id is always zero based, so always 0 if we're running on only one core */ + #if configNUMBER_OF_CORES == portMAX_CORE_COUNT + #define portGET_CORE_ID() get_core_num() + #else + #define portGET_CORE_ID() 0 + #endif + #define portCHECK_IF_IN_ISR() ({ \ uint32_t ulIPSR; \ __asm volatile ("mrs %0, IPSR" : "=r" (ulIPSR)::); \ ((uint8_t)ulIPSR)>0;}) + void vYieldCore(int xCoreID); + #define portYIELD_CORE(a) vYieldCore(a) + #define portRESTORE_INTERRUPTS(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + +/*-----------------------------------------------------------*/ + +/* Critical nesting count management. */ + extern UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ]; + #define portGET_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ] ) + #define portSET_CRITICAL_NESTING_COUNT( x ) ( uxCriticalNestings[ portGET_CORE_ID() ] = ( x ) ) + #define portINCREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]++ ) + #define portDECREMENT_CRITICAL_NESTING_COUNT() ( uxCriticalNestings[ portGET_CORE_ID() ]-- ) + /*-----------------------------------------------------------*/ /* Critical section management. */ + + #define portSET_INTERRUPT_MASK() ({ \ + uint32_t ulState; \ + __asm volatile ("mrs %0, PRIMASK" : "=r" (ulState)::); \ + __asm volatile ( " cpsid i " ::: "memory" ); \ + ulState;}) + + #define portCLEAR_INTERRUPT_MASK(ulState) __asm volatile ("msr PRIMASK,%0"::"r" (ulState) : ) + extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() @@ -126,10 +170,73 @@ extern void vPortEnableInterrupts(); #define portENABLE_INTERRUPTS() vPortEnableInterrupts() - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() + #if ( configNUMBER_OF_CORES == 1 ) + extern void vPortEnterCritical( void ); + extern void vPortExitCritical( void ); + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() + #else + extern void vTaskEnterCritical( void ); + extern void vTaskExitCritical( void ); + extern UBaseType_t vTaskEnterCriticalFromISR( void ); + extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); + #define portENTER_CRITICAL() vTaskEnterCritical() + #define portEXIT_CRITICAL() vTaskExitCritical() + #define portENTER_CRITICAL_FROM_ISR() vTaskEnterCriticalFromISR() + #define portEXIT_CRITICAL_FROM_ISR( x ) vTaskExitCriticalFromISR( x ) + #endif + + #define portRTOS_SPINLOCK_COUNT 2 + + /* Note this is a single method with uxAcquire parameter since we have + * static vars, the method is always called with a compile time constant for + * uxAcquire, and the compiler should dothe right thing! */ + static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { + static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; + configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); + uint32_t ulCoreNum = get_core_num(); + uint32_t ulLockBit = 1u << ulLockNum; + configASSERT(ulLockBit < 256u ); + if( uxAcquire ) + { + if( __builtin_expect( !*pxSpinLock, 0 ) ) + { + if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + { + configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); + ucRecursionCountByLock[ulLockNum]++; + return; + } + while ( __builtin_expect( !*pxSpinLock, 0 ) ); + } + __mem_fence_acquire(); + configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); + ucRecursionCountByLock[ulLockNum] = 1; + ucOwnedByCore[ulCoreNum] |= ulLockBit; + } else { + configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); + configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); + if( !--ucRecursionCountByLock[ulLockNum] ) + { + ucOwnedByCore[ulCoreNum] &= ~ulLockBit; + __mem_fence_release(); + *pxSpinLock = 1; + } + } + } + + #if ( configNUMBER_OF_CORES == 1 ) + #define portGET_ISR_LOCK() + #define portRELEASE_ISR_LOCK() + #define portGET_TASK_LOCK() + #define portRELEASE_TASK_LOCK() + #else + #define portGET_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdTRUE) + #define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdFALSE) + #define portGET_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdTRUE) + #define portRELEASE_TASK_LOCK() vPortRecursiveLock(1, spin_lock_instance(configSMP_SPINLOCK_1), pdFALSE) + #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h index 2406bd921f..35c6f24635 100644 --- a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h +++ b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h @@ -65,6 +65,25 @@ #endif #endif +#if ( configNUMBER_OF_CORES > 1 ) + /* configTICK_CORE indicates which core should handle the SysTick + * interrupts */ + #ifndef configTICK_CORE + #define configTICK_CORE 0 + #endif +#endif + +/* This SMP port requires two spin locks, which are claimed from the SDK. + * the spin lock numbers to be used are defined statically and defaulted here + * to the values nominally set aside for RTOS by the SDK */ +#ifndef configSMP_SPINLOCK_0 + #define configSMP_SPINLOCK_0 PICO_SPINLOCK_ID_OS1 +#endif + +#ifndef configSMP_SPINLOCK_1 + #define configSMP_SPINLOCK_1 PICO_SPINLOCK_ID_OS2 +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index 902a21766a..1db96517af 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -33,11 +33,14 @@ target_include_directories(FreeRTOS-Kernel INTERFACE target_link_libraries(FreeRTOS-Kernel INTERFACE FreeRTOS-Kernel-Core pico_base_headers - hardware_exception) + hardware_clocks + hardware_exception + pico_multicore +) target_compile_definitions(FreeRTOS-Kernel INTERFACE LIB_FREERTOS_KERNEL=1 - FREERTOS_KERNEL_SMP=0 + FREE_RTOS_KERNEL_SMP=1 ) add_library(FreeRTOS-Kernel-Static INTERFACE) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index 41dd114a00..73452e068a 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -46,6 +46,9 @@ #include "pico/multicore.h" #endif /* LIB_PICO_MULTICORE */ +/* TODO : consider to remove this macro. */ +#define portRUNNING_ON_BOTH_CORES ( configNUMBER_OF_CORES == portMAX_CORE_COUNT ) + /* Constants required to manipulate the NVIC. */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) @@ -112,7 +115,11 @@ static void prvTaskExitError( void ); /* Each task maintains its own interrupt status in the critical nesting * variable. This is initialized to 0 to allow vPortEnter/ExitCritical * to be called before the scheduler is started */ -static UBaseType_t uxCriticalNesting; +#if ( configNUMBER_OF_CORES == 1 ) + static UBaseType_t uxCriticalNesting; +#else /* #if ( configNUMBER_OF_CORES == 1 ) */ + UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -125,13 +132,13 @@ static UBaseType_t uxCriticalNesting; #define pEventGroup (&xStaticEventGroup) #endif /* configSUPPORT_STATIC_ALLOCATION */ static EventGroupHandle_t xEventGroup; - #if ( LIB_PICO_MULTICORE == 1 ) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) static EventBits_t uxCrossCoreEventBits; static spin_lock_t * pxCrossCoreSpinLock; - #endif /* LIB_PICO_MULTICORE */ + #endif - static spin_lock_t * pxYieldSpinLock; - static uint32_t ulYieldSpinLockSaveValue; + static spin_lock_t * pxYieldSpinLock[ configNUMBER_OF_CORES ]; + static uint32_t ulYieldSpinLockSaveValue[ configNUMBER_OF_CORES ]; #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* @@ -159,9 +166,16 @@ static UBaseType_t uxCriticalNesting; /*-----------------------------------------------------------*/ -#define INVALID_LAUNCH_CORE_NUM 0xffu -static uint8_t ucLaunchCoreNum = INVALID_LAUNCH_CORE_NUM; -#define portIS_FREE_RTOS_CORE() ( ucLaunchCoreNum == get_core_num() ) +#define INVALID_PRIMARY_CORE_NUM 0xffu +/* The primary core number (the own which has the SysTick handler) */ +static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM; + +/* Note: portIS_FREE_RTOS_CORE() also returns false until the scheduler is started */ +#if ( portRUNNING_ON_BOTH_CORES == 1 ) + #define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM) +#else + #define portIS_FREE_RTOS_CORE() (ucPrimaryCoreNum == get_core_num()) +#endif /* * See header file for description. @@ -204,6 +218,7 @@ void vPortSVCHandler( void ) void vPortStartFirstTask( void ) { +#if ( configNUMBER_OF_CORES == 1 ) __asm volatile ( " .syntax unified \n" " ldr r2, pxCurrentTCBConst1 \n"/* Obtain location of pxCurrentTCB. */ @@ -220,9 +235,49 @@ void vPortStartFirstTask( void ) " pop {r2} \n"/* Pop and discard XPSR. */ " cpsie i \n"/* The first task has its context and interrupts can be enabled. */ " bx r3 \n"/* Finally, jump to the user defined task code. */ - " .align 4 \n" - "pxCurrentTCBConst1: .word pxCurrentTCB\n" + " .align 4 \n" + "pxCurrentTCBConst1: .word pxCurrentTCB\n" + ); +#else + __asm volatile ( + " .syntax unified \n" + #if configRESET_STACK_POINTER + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + #endif /* configRESET_STACK_POINTER */ + #if portRUNNING_ON_BOTH_CORES + " adr r1, ulAsmLocals \n"/* Get the location of the current TCB for the current core. */ + " ldmia r1!, {r2, r3} \n" + " ldr r2, [r2] \n"/* r2 = Core number */ + " lsls r2, #2 \n" + " ldr r3, [r3, r2] \n"/* r3 = pxCurrentTCBs[get_core_num()] */ + #else + " ldr r3, =pxCurrentTCBs \n" + " ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */ + #endif /* portRUNNING_ON_BOTH_CORES */ + " ldr r0, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " movs r0, #2 \n"/* Switch to the psp stack. */ + " msr CONTROL, r0 \n" + " isb \n" + " pop {r0-r5} \n"/* Pop the registers that are saved automatically. */ + " mov lr, r5 \n"/* lr is now in r5. */ + " pop {r3} \n"/* Return address is now in r3. */ + " pop {r2} \n"/* Pop and discard XPSR. */ + " cpsie i \n"/* The first task has its context and interrupts can be enabled. */ + " bx r3 \n"/* Finally, jump to the user defined task code. */ + #if portRUNNING_ON_BOTH_CORES + " \n" + " .align 4 \n" + "ulAsmLocals: \n" + " .word 0xD0000000 \n"/* SIO */ + " .word pxCurrentTCBs \n" + #endif /* portRUNNING_ON_BOTH_CORES */ ); +#endif } /*-----------------------------------------------------------*/ @@ -232,66 +287,158 @@ void vPortStartFirstTask( void ) /* We must remove the contents (which we don't care about) * to clear the IRQ */ multicore_fifo_drain(); + + /* And explicitly clear any other IRQ flags. */ multicore_fifo_clear_irq(); - BaseType_t xHigherPriorityTaskWoken = pdFALSE; - uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock ); - EventBits_t ulBits = uxCrossCoreEventBits; - uxCrossCoreEventBits &= ~ulBits; - spin_unlock( pxCrossCoreSpinLock, ulSave ); - xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken ); - portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + + #if ( portRUNNING_ON_BOTH_CORES == 1 ) + portYIELD_FROM_ISR( pdTRUE ); + #elif ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) + BaseType_t xHigherPriorityTaskWoken = pdFALSE; + uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock ); + EventBits_t ulBits = uxCrossCoreEventBits; + uxCrossCoreEventBits &= ~ulBits; + spin_unlock( pxCrossCoreSpinLock, ulSave ); + xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken ); + portYIELD_FROM_ISR( xHigherPriorityTaskWoken ); + #endif /* portRUNNING_ON_BOTH_CORES */ } #endif -/* - * See header file for description. - */ -BaseType_t xPortStartScheduler( void ) -{ - /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ - portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; - portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; - - #if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1) - exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); - exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); - exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); - #endif +#if ( configNUMBER_OF_CORES > 1 ) + /* + * See header file for description. + */ + static BaseType_t xPortStartSchedulerOnCore() + { + if( ucPrimaryCoreNum == get_core_num()) + { + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 ) + exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); + #endif + } - /* Start the timer that generates the tick ISR. Interrupts are disabled - * here already. */ - vPortSetupTimerInterrupt(); - - /* Initialise the critical nesting count ready for the first task. */ - uxCriticalNesting = 0; - - ucLaunchCoreNum = get_core_num(); - #if (LIB_PICO_MULTICORE == 1) - #if ( configSUPPORT_PICO_SYNC_INTEROP == 1) - multicore_fifo_clear_irq(); - multicore_fifo_drain(); - uint32_t irq_num = 15 + get_core_num(); - irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY ); - irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler ); - irq_set_enabled( irq_num, 1 ); + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + + #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 ) + exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); + exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); #endif + + /* Install FIFO handler to receive interrupt from other core */ + multicore_fifo_clear_irq(); + multicore_fifo_drain(); + uint32_t ulIRQNum = SIO_IRQ_PROC0 + get_core_num(); + irq_set_priority( ulIRQNum, portMIN_INTERRUPT_PRIORITY ); + irq_set_exclusive_handler( ulIRQNum, prvFIFOInterruptHandler ); + irq_set_enabled( ulIRQNum, 1 ); + + /* Start the first task. */ + vPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimisation does not remove the + * symbol. */ + vTaskSwitchContext( portGET_CORE_ID() ); + prvTaskExitError(); + + /* Should not get here! */ + return 0; + } + + #if portRUNNING_ON_BOTH_CORES + static void prvDisableInterruptsAndPortStartSchedulerOnCore( void ) + { + portDISABLE_INTERRUPTS(); + xPortStartSchedulerOnCore(); + } #endif - /* Start the first task. */ - vPortStartFirstTask(); + /* + * See header file for description. + */ + BaseType_t xPortStartScheduler( void ) + { + configASSERT( ucPrimaryCoreNum == INVALID_PRIMARY_CORE_NUM ); - /* Should never get here as the tasks will now be executing! Call the task - * exit error function to prevent compiler warnings about a static function - * not being called in the case that the application writer overrides this - * functionality by defining configTASK_RETURN_ADDRESS. Call - * vTaskSwitchContext() so link time optimisation does not remove the - * symbol. */ - vTaskSwitchContext(); - prvTaskExitError(); + /* No one else should use these! */ + spin_lock_claim( configSMP_SPINLOCK_0 ); + spin_lock_claim( configSMP_SPINLOCK_1 ); + + #if portRUNNING_ON_BOTH_CORES + ucPrimaryCoreNum = configTICK_CORE; + configASSERT( get_core_num() == 0) ; // we must be started on core 0 + multicore_launch_core1( prvDisableInterruptsAndPortStartSchedulerOnCore ); + #else + ucPrimaryCoreNum = get_core_num(); + #endif + xPortStartSchedulerOnCore(); + + /* Should not get here! */ + return 0; + } + +#else + /* + * See header file for description. + */ + BaseType_t xPortStartScheduler( void ) + { + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1) + exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler ); + exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler ); + exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler ); + #endif + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialise the critical nesting count ready for the first task. */ + uxCriticalNesting = 0; + + ucPrimaryCoreNum = get_core_num(); + #if (LIB_PICO_MULTICORE == 1) + #if ( configSUPPORT_PICO_SYNC_INTEROP == 1) + multicore_fifo_clear_irq(); + multicore_fifo_drain(); + uint32_t irq_num = 15 + get_core_num(); + irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY ); + irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler ); + irq_set_enabled( irq_num, 1 ); + #endif + #endif + + /* Start the first task. */ + vPortStartFirstTask(); + + /* Should never get here as the tasks will now be executing! Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimisation does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here! */ + return 0; + } +#endif - /* Should not get here! */ - return 0; -} /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) @@ -305,8 +452,8 @@ void vPortYield( void ) { #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) /* We are not in an ISR, and pxYieldSpinLock is always dealt with and - * cleared interrupts are re-enabled, so should be NULL */ - configASSERT( pxYieldSpinLock == NULL ); + * cleared when interrupts are re-enabled, so should be NULL */ + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); #endif /* configSUPPORT_PICO_SYNC_INTEROP */ /* Set a PendSV to request a context switch. */ @@ -320,31 +467,38 @@ void vPortYield( void ) /*-----------------------------------------------------------*/ -void vPortEnterCritical( void ) -{ - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - __asm volatile ( "dsb" ::: "memory" ); - __asm volatile ( "isb" ); -} +#if ( configNUMBER_OF_CORES == 1 ) + void vPortEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); + } +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ -void vPortExitCritical( void ) -{ - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - if( uxCriticalNesting == 0 ) +#if ( configNUMBER_OF_CORES == 1 ) + void vPortExitCritical( void ) { - portENABLE_INTERRUPTS(); + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } } -} +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ -void vPortEnableInterrupts() { +void vPortEnableInterrupts( void ) +{ #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) - if( pxYieldSpinLock ) + int xCoreID = portGET_CORE_ID(); + if( pxYieldSpinLock[xCoreID] ) { - spin_unlock(pxYieldSpinLock, ulYieldSpinLockSaveValue); - pxYieldSpinLock = NULL; + spin_lock_t* const pxTmpLock = pxYieldSpinLock[xCoreID]; + pxYieldSpinLock[xCoreID] = NULL; + spin_unlock( pxTmpLock, ulYieldSpinLockSaveValue[xCoreID] ); } #endif __asm volatile ( " cpsie i " ::: "memory" ); @@ -371,12 +525,25 @@ void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask ) ::: "memory" ); } + +/*-----------------------------------------------------------*/ + +void vYieldCore( int xCoreID ) +{ + configASSERT(xCoreID != portGET_CORE_ID()); + #if portRUNNING_ON_BOTH_CORES + /* Non blocking, will cause interrupt on other core if the queue isn't already full, + in which case an IRQ must be pending */ + sio_hw->fifo_wr = 0; + #endif +} + /*-----------------------------------------------------------*/ void xPortPendSVHandler( void ) { /* This is a naked function. */ - +#if ( configNUMBER_OF_CORES == 1 ) __asm volatile ( " .syntax unified \n" @@ -452,9 +619,103 @@ void xPortPendSVHandler( void ) " ldmia r0!, {r4-r7} \n"/* Pop low registers. */ " \n" " bx r3 \n" - " .align 4 \n" - "pxCurrentTCBConst2: .word pxCurrentTCB \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + ); +#else + __asm volatile + ( + " .syntax unified \n" + " mrs r1, psp \n" + " \n" + " adr r0, ulAsmLocals2 \n"/* Get the location of the current TCB for the current core. */ + " ldmia r0!, {r2, r3} \n" + #if portRUNNING_ON_BOTH_CORES + " ldr r0, [r2] \n"/* r0 = Core number */ + " lsls r0, r0, #2 \n" + " adds r3, r0 \n"/* r3 = &pxCurrentTCBs[get_core_num()] */ + #else + " \n"/* r3 = &pxCurrentTCBs[0] */ + #endif /* portRUNNING_ON_BOTH_CORES */ + " ldr r0, [r3] \n"/* r0 = pxCurrentTCB */ + " \n" + " subs r1, r1, #32 \n"/* Make space for the remaining low registers. */ + " str r1, [r0] \n"/* Save the new top of stack. */ + " stmia r1!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* Store the high registers. */ + " mov r5, r9 \n" + " mov r6, r10 \n" + " mov r7, r11 \n" + " stmia r1!, {r4-r7} \n" + #if portUSE_DIVIDER_SAVE_RESTORE + /* We expect that the divider is ready at this point (which is + * necessary to safely save/restore), because: + * a) if we have not been interrupted since we entered this method, + * then >8 cycles have clearly passed, so the divider is done + * b) if we were interrupted in the interim, then any "safe" - i.e. + * does the right thing in an IRQ - use of the divider should + * have waited for any in-process divide to complete, saved and + * then fully restored the result, thus the result is ready in + * that case too. */ + " ldr r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */ + " ldr r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */ + " ldr r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */ + " ldr r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */ + /* We actually save the divider state in the 4 words below + * our recorded stack pointer, so as not to disrupt the stack + * frame expected by debuggers - this is addressed by + * portEXTRA_STACK_SIZE */ + " subs r1, r1, #48 \n" + " stmia r1!, {r4-r7} \n" + #endif /* portUSE_DIVIDER_SAVE_RESTORE */ + #if portRUNNING_ON_BOTH_CORES + " ldr r0, [r2] \n"/* r0 = Core number */ + #else + " movs r0, #0 \n" + #endif /* portRUNNING_ON_BOTH_CORES */ + " push {r3, r14} \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " pop {r2, r3} \n"/* lr goes in r3. r2 now holds tcb pointer. */ + " \n" + " ldr r1, [r2] \n" + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " adds r0, r0, #16 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Pop the high registers. */ + " mov r8, r4 \n" + " mov r9, r5 \n" + " mov r10, r6 \n" + " mov r11, r7 \n" + " \n" + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " \n" + #if portUSE_DIVIDER_SAVE_RESTORE + " movs r2, #0xd \n"/* Pop the divider state. */ + " lsls r2, #28 \n" + " subs r0, r0, #48 \n"/* Go back for the divider state */ + " ldmia r0!, {r4-r7} \n"/* Pop the divider state. */ + /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the + * results stopping the calculation anyway, however the sign of results + * is adjusted by the h/w at read time based on whether the last started + * division was signed and the inputs' signs differed */ + " str r4, [r2, #0x60] \n"/* SIO_DIV_UDIVIDEND_OFFSET */ + " str r5, [r2, #0x64] \n"/* SIO_DIV_UDIVISOR_OFFSET */ + " str r6, [r2, #0x74] \n"/* SIO_DIV_REMAINDER_OFFSET */ + " str r7, [r2, #0x70] \n"/* SIO_DIV_QUOTIENT_OFFSET */ + #else + " subs r0, r0, #32 \n"/* Go back for the low registers that are not automatically restored. */ + #endif /* portUSE_DIVIDER_SAVE_RESTORE */ + " ldmia r0!, {r4-r7} \n"/* Pop low registers. */ + " \n" + " bx r3 \n" + " \n" + " .align 4 \n" + "ulAsmLocals2: \n" + " .word 0xD0000000 \n"/* SIO */ + " .word pxCurrentTCBs \n" ); +#endif } /*-----------------------------------------------------------*/ @@ -462,7 +723,7 @@ void xPortSysTickHandler( void ) { uint32_t ulPreviousMask; - ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + ulPreviousMask = taskENTER_CRITICAL_FROM_ISR(); { /* Increment the RTOS tick. */ if( xTaskIncrementTick() != pdFALSE ) @@ -471,7 +732,7 @@ void xPortSysTickHandler( void ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); + taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask ); } /*-----------------------------------------------------------*/ @@ -729,6 +990,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave ) { configASSERT( !portCHECK_IF_IN_ISR() ); + // note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined if( !portIS_FREE_RTOS_CORE() ) { spin_unlock(pxLock->spin_lock, ulSave ); @@ -736,14 +998,15 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - configASSERT( pxYieldSpinLock == NULL ); + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); // we want to hold the lock until the event bits have been set; since interrupts are currently disabled // by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when // the scheduler is unlocked during this call configASSERT(pxLock->spin_lock); - pxYieldSpinLock = pxLock->spin_lock; - ulYieldSpinLockSaveValue = ulSave; + int xCoreID = portGET_CORE_ID(); + pxYieldSpinLock[xCoreID] = pxLock->spin_lock; + ulYieldSpinLockSaveValue[xCoreID] = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, portMAX_DELAY); } @@ -771,7 +1034,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) else { __sev(); - #if ( LIB_PICO_MULTICORE == 1) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) /* We could sent the bits across the FIFO which would have required us to block here if the FIFO was full, * or we could have just set all bits on the other side, however it seems reasonable instead to take * the hit of another spin lock to protect an accurate bit set. */ @@ -787,7 +1050,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } /* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */ sio_hw->fifo_wr = 0; - #endif /* LIB_PICO_MULTICORE */ + #endif /* portRUNNING_ON_BOTH_CORES == 0 */ spin_unlock(pxLock->spin_lock, ulSave); } } @@ -803,7 +1066,8 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) } else { - configASSERT( pxYieldSpinLock == NULL ); + configASSERT( portIS_FREE_RTOS_CORE() ); + configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL ); TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil ); if( uxTicksToWait ) @@ -812,14 +1076,12 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) * by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when * the scheduler is unlocked during this call */ configASSERT(pxLock->spin_lock); - pxYieldSpinLock = pxLock->spin_lock; - ulYieldSpinLockSaveValue = ulSave; + int xCoreID = portGET_CORE_ID(); + pxYieldSpinLock[xCoreID] = pxLock->spin_lock; + ulYieldSpinLockSaveValue[xCoreID] = ulSave; xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock), pdTRUE, pdFALSE, uxTicksToWait ); - /* sanity check that interrupts were disabled, then re-enabled during the call, which will have - * taken care of the yield */ - configASSERT( pxYieldSpinLock == NULL ); } else { @@ -845,7 +1107,7 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) { /* This must be done even before the scheduler is started, as the spin lock * is used by the overrides of the SDK wait/notify primitives */ - #if ( LIB_PICO_MULTICORE == 1 ) + #if ( portRUNNING_ON_BOTH_CORES == 0 ) pxCrossCoreSpinLock = spin_lock_instance( next_striped_spin_lock_num() ); #endif /* portRUNNING_ON_BOTH_CORES */ diff --git a/portable/ThirdParty/xClang/XCOREAI/port.c b/portable/ThirdParty/xClang/XCOREAI/port.c new file mode 100644 index 0000000000..0d73073830 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/port.c @@ -0,0 +1,254 @@ +// Copyright (c) 2019, XMOS Ltd, All rights reserved + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include +#include +#include +#include + +static hwtimer_t xKernelTimer; + +uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ] = { pdFALSE }; + +/*-----------------------------------------------------------*/ + +void vIntercoreInterruptISR( void ) +{ + int xCoreID; + +// debug_printf( "In KCALL: %u\n", ulData ); + xCoreID = rtos_core_id_get(); + ulPortYieldRequired[ xCoreID ] = pdTRUE; +} +/*-----------------------------------------------------------*/ + +DEFINE_RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR, pvData ) +{ + uint32_t ulLastTrigger; + uint32_t ulNow; + int xCoreID; + UBaseType_t uxSavedInterruptStatus; + + xCoreID = 0; + + configASSERT( xCoreID == rtos_core_id_get() ); + + /* Need the next interrupt to be scheduled relative to + * the current trigger time, rather than the current + * time. */ + ulLastTrigger = hwtimer_get_trigger_time( xKernelTimer ); + + /* Check to see if the ISR is late. If it is, we don't + * want to schedule the next interrupt to be in the past. */ + ulNow = hwtimer_get_time( xKernelTimer ); + if( ulNow - ulLastTrigger >= configCPU_CLOCK_HZ / configTICK_RATE_HZ ) + { + ulLastTrigger = ulNow; + } + + ulLastTrigger += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + hwtimer_change_trigger_time( xKernelTimer, ulLastTrigger ); + +#if configUPDATE_RTOS_TIME_FROM_TICK_ISR == 1 + rtos_time_increment( RTOS_TICK_PERIOD( configTICK_RATE_HZ ) ); +#endif + + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + if( xTaskIncrementTick() != pdFALSE ) + { + ulPortYieldRequired[ xCoreID ] = pdTRUE; + } + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); +} +/*-----------------------------------------------------------*/ + +void vPortYieldOtherCore( int xOtherCoreID ) +{ + int xCoreID; + + /* + * This function must be called from within a critical section. + */ + + xCoreID = rtos_core_id_get(); + +// debug_printf("%d->%d\n", xCoreID, xOtherCoreID); + +// debug_printf("Yield core %d from %d\n", xOtherCoreID, xCoreID ); + + rtos_irq( xOtherCoreID, xCoreID ); +} +/*-----------------------------------------------------------*/ + +static int prvCoreInit( void ) +{ + int xCoreID; + + xCoreID = rtos_core_register(); + debug_printf( "Logical Core %d initializing as FreeRTOS Core %d\n", get_logical_core_id(), xCoreID ); + + asm volatile ( + "ldap r11, kexcept\n\t" + "set kep, r11\n\t" + : + : + : "r11" + ); + + rtos_irq_enable( configNUMBER_OF_CORES ); + + /* + * All threads wait here until all have enabled IRQs + */ + while( rtos_irq_ready() == pdFALSE ); + + if( xCoreID == 0 ) + { + uint32_t ulNow; + ulNow = hwtimer_get_time( xKernelTimer ); +// debug_printf( "The time is now (%u)\n", ulNow ); + + ulNow += configCPU_CLOCK_HZ / configTICK_RATE_HZ; + + triggerable_setup_interrupt_callback( xKernelTimer, NULL, RTOS_INTERRUPT_CALLBACK( pxKernelTimerISR ) ); + hwtimer_set_trigger_time( xKernelTimer, ulNow ); + triggerable_enable_trigger( xKernelTimer ); + } + + return xCoreID; +} +/*-----------------------------------------------------------*/ + +DEFINE_RTOS_KERNEL_ENTRY( void, vPortStartSchedulerOnCore, void ) +{ + int xCoreID; + + xCoreID = prvCoreInit(); + + #if( configUSE_CORE_INIT_HOOK == 1 ) + { + extern void vApplicationCoreInitHook( BaseType_t xCoreID ); + + vApplicationCoreInitHook( xCoreID ); + } + #endif + + debug_printf( "FreeRTOS Core %d initialized\n", xCoreID ); + + /* + * Restore the context of the first thread + * to run and jump into it. + */ + asm volatile ( + "mov r6, %0\n\t" /* R6 must be the FreeRTOS core ID*/ + "ldaw r5, dp[pxCurrentTCBs]\n\t" /* R5 must be the TCB list which is indexed by R6 */ + "bu _freertos_restore_ctx\n\t" + : /* no outputs */ + : "r"(xCoreID) + : "r5", "r6" + ); +} +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* Public functions required by all ports below: */ +/*-----------------------------------------------------------*/ + +/* + * See header file for description. + */ +StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) +{ + //debug_printf( "Top of stack was %p for task %p\n", pxTopOfStack, pxCode ); + /* + * Grow the thread's stack by portTHREAD_CONTEXT_STACK_GROWTH + * so we can push the context onto it. + */ + pxTopOfStack -= portTHREAD_CONTEXT_STACK_GROWTH; + + uint32_t dp; + uint32_t cp; + + /* + * We need to get the current CP and DP pointers. + */ + asm volatile ( + "ldaw r11, cp[0]\n\t" /* get CP into R11 */ + "mov %0, r11\n\t" /* get R11 (CP) into cp */ + "ldaw r11, dp[0]\n\t" /* get DP into R11 */ + "mov %1, r11\n\t" /* get R11 (DP) into dp */ + : "=r"(cp), "=r"(dp) /* output 0 is cp, output 1 is dp */ + : /* there are no inputs */ + : "r11" /* R11 gets clobbered */ + ); + + /* + * Push the thread context onto the stack. + * Saved PC will point to the new thread's + * entry pointer. + * Interrupts will default to enabled. + * KEDI is also set to enable dual issue mode + * upon kernel entry. + */ + pxTopOfStack[ 1 ] = ( StackType_t ) pxCode; /* SP[1] := SPC */ + pxTopOfStack[ 2 ] = XS1_SR_IEBLE_MASK + | XS1_SR_KEDI_MASK; /* SP[2] := SSR */ + pxTopOfStack[ 3 ] = 0x00000000; /* SP[3] := SED */ + pxTopOfStack[ 4 ] = 0x00000000; /* SP[4] := ET */ + pxTopOfStack[ 5 ] = dp; /* SP[5] := DP */ + pxTopOfStack[ 6 ] = cp; /* SP[6] := CP */ + pxTopOfStack[ 7 ] = 0x00000000; /* SP[7] := LR */ + pxTopOfStack[ 8 ] = ( StackType_t ) pvParameters; /* SP[8] := R0 */ + pxTopOfStack[ 9 ] = 0x01010101; /* SP[9] := R1 */ + pxTopOfStack[ 10 ] = 0x02020202; /* SP[10] := R2 */ + pxTopOfStack[ 11 ] = 0x03030303; /* SP[11] := R3 */ + pxTopOfStack[ 12 ] = 0x04040404; /* SP[12] := R4 */ + pxTopOfStack[ 13 ] = 0x05050505; /* SP[13] := R5 */ + pxTopOfStack[ 14 ] = 0x06060606; /* SP[14] := R6 */ + pxTopOfStack[ 15 ] = 0x07070707; /* SP[15] := R7 */ + pxTopOfStack[ 16 ] = 0x08080808; /* SP[16] := R8 */ + pxTopOfStack[ 17 ] = 0x09090909; /* SP[17] := R9 */ + pxTopOfStack[ 18 ] = 0x10101010; /* SP[18] := R10 */ + pxTopOfStack[ 19 ] = 0x11111111; /* SP[19] := R11 */ + pxTopOfStack[ 20 ] = 0x00000000; /* SP[20] := vH and vSR */ + memset(&pxTopOfStack[21], 0, 32); /* SP[21 - 28] := vR */ + memset(&pxTopOfStack[29], 1, 32); /* SP[29 - 36] := vD */ + memset(&pxTopOfStack[37], 2, 32); /* SP[37 - 44] := vC */ + + //debug_printf( "Top of stack is now %p for task %p\n", pxTopOfStack, pxCode ); + + /* + * Returns the new top of the stack + */ + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +void vPortStartSMPScheduler( void ); + +/* + * See header file for description. + */ +BaseType_t xPortStartScheduler( void ) +{ + if( ( configNUMBER_OF_CORES > portMAX_CORE_COUNT ) || ( configNUMBER_OF_CORES <= 0 ) ) + { + return pdFAIL; + } + + rtos_locks_initialize(); + xKernelTimer = hwtimer_alloc(); + + vPortStartSMPScheduler(); + + return pdPASS; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) +{ + /* Do not implement. */ +} +/*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/xClang/XCOREAI/port.xc b/portable/ThirdParty/xClang/XCOREAI/port.xc new file mode 100644 index 0000000000..926b15086c --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/port.xc @@ -0,0 +1,26 @@ +/* + * port.xc + * + * Created on: Jul 31, 2019 + * Author: mbruno + */ + +//#include "rtos_support.h" + +extern "C" { + +#include "FreeRTOSConfig.h" /* to get configNUMBER_OF_CORES */ +#ifndef configNUMBER_OF_CORES +#define configNUMBER_OF_CORES 1 +#endif + +void __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(void); + +} /* extern "C" */ + +void vPortStartSMPScheduler( void ) +{ + par (int i = 0; i < configNUMBER_OF_CORES; i++) { + __xcore_interrupt_permitted_ugs_vPortStartSchedulerOnCore(); + } +} diff --git a/portable/ThirdParty/xClang/XCOREAI/portasm.S b/portable/ThirdParty/xClang/XCOREAI/portasm.S new file mode 100644 index 0000000000..7445672a08 --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/portasm.S @@ -0,0 +1,189 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#include "rtos_support_rtos_config.h" + +/* The FreeRTOS interrupt code calls vTaskSwitchContext. +Therfore it must be added to the rtos_isr group with the +rest of the ISR callback functions. */ +.weak _fptrgroup.rtos_isr.nstackwords.group +.add_to_set _fptrgroup.rtos_isr.nstackwords.group, vTaskSwitchContext.nstackwords, vTaskSwitchContext + +.globl kexcept +.align 128 /* align the kernel section to 128 bytes */ +.type kexcept,@function +.issue_mode dual +.cc_top kexcept.function, kexcept +kexcept: + ldc r11, 0x0008 + shl r11, r11, 16 + ldc r9, 0x0080 + or r11, r11, r9 + bau r11 //_TrapHandler is at 0x00080080. TODO: Is it always? Why can't I access the symbol _TrapHandler? + +_yield: + {set sp, r4 /* Restore the task's SP to save the rest of its context. */ + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0 */ + bu _yield_continue /* Skip the ulPortYieldRequired check and jump right to */ + /* the context save and switch. Also skips saving SPC */ + /* since the kcall handler has already saved it. */ + +.align 64 +kcall: + /* start saving the thread's context */ + extsp RTOS_SUPPORT_INTERRUPT_STACK_GROWTH + stw r1, sp[9] + stw r11, sp[19] + + /* kcall sets SPC to the instruction of the kcall rather than the next instruction */ + /* so we need to adjust the SPC value that we save to the stack: */ + stw spc, sp[1] /* save the saved program counter onto the stack... */ + ldw r1, sp[1] /* so that we can load it into r1 (which we have already saved). */ + add r1, r1, 4 /* Add 4 to the spc to make it point to the instruction after the kcall. */ + {stw r1, sp[1] /* Now save it to the stack. */ + + /* kcall uses the same common function as interrupt callbacks. */ + /* tell it to call _yield above. */ + ldap r11, _yield} + mov r1, r11 + + /* fall into rtos_interrupt_callback_common */ + +.globl rtos_interrupt_callback_common +rtos_interrupt_callback_common: + /* This is the body of the RTOS _xcore_c_interrupt_callback_XXX functions. */ + /* r1 = interrupt_callback_t function */ + + /* Save the thread's context onto the thread's stack. */ + /* The stack was extended for this by the wrapper function. */ + /* Begin only by saving some registers. The rest will be saved */ + /* later if vTaskSwitchContext() needs to be called. */ + /* DP and CP need to be saved because these are restored for the kernel ISR. */ + /* LR needs to be saved because it is clobbered when calling the callback. */ + /* r0-r3, and r11 need to be saved because the callback may clobber them. */ + /* r4 is saved because it is used here to hold the task SP. */ + + stw dp, sp[5] + stw cp, sp[6] + stw lr, sp[7] + stw r0, sp[8] +/*stw r1, sp[9] already saved by the wrapper function. */ + stw r2, sp[10] + stw r3, sp[11] + {stw r4, sp[12] +/*stw r11, sp[19] already saved by the wrapper function. */ + + ldaw r4, sp[0]} /* Get value of current stackpointer into r4. */ + + {kentsp 0 /* switch to the kernel stack. */ + /* The value 0 is safe to use since we don't need the SP */ + /* that it saves to KSP[0]. We already have it in r4. */ + + get r11, ed} /* Get the event data... */ + ldw dp, sp[3] /* (Restore CP and DP required for the RTOS ISR */ + ldw cp, sp[4] /* in case the active thread has modified them.) */ + {mov r0, r11 /* ...into the first argument for the callback function, */ + bla r1} /* and call the callback function. */ + + {set sp, r4 /* Restore the task's SP now. */ + + get r11, id} /* Get the logical core ID into r11. */ + ldaw r0, dp[rtos_core_map] + ldw r0, r0[r11] /* Translate to the RTOS core ID into r0. */ + ldaw r2, dp[ulPortYieldRequired] /* Get the yield required array into r2. */ + ldw r1, r2[r0] /* Is a yield required for this core? */ + {bf r1, _freertos_restore_ctx_partial /* If not, restore the context now. */ + ldc r1, 0} + stw r1, r2[r0] /* Otherwise, clear the yield required flag. */ + + /* Save the rest of the current task's context. */ + + /* Save standard xs2 regs */ + stw spc, sp[1] +_yield_continue: + stw ssr, sp[2] + stw sed, sp[3] + stw et, sp[4] + stw r5, sp[13] + stw r6, sp[14] + stw r7, sp[15] + stw r8, sp[16] + stw r9, sp[17] + stw r10, sp[18] +#if 1 + /* Save VPU status and headroom */ + vgetc r11 + {stw r11, sp[20] + /* Save VPU regs */ + ldaw r11, sp[21]} + {vstr r11[0] + ldaw r11, sp[29]} + {vstd r11[0] + ldaw r11, sp[37]} + vstc r11[0] +#endif + ldaw r5, dp[pxCurrentTCBs] /* Get the current TCB array into r5. */ + ldw r1, r5[r0] /* Get this core's current TCB pointer into r1. */ + stw r4, r1[0x0] /* Save the current task's SP to the first */ + /* word (top of stack) in the current TCB. */ + + {kentsp 0 /* switch back to the kernel stack. */ + + mov r6, r0} /* copy the RTOS core ID into r6 so we don't lose it. */ + ldap r11, vTaskSwitchContext + bla r11 /* Finally call vTaskSwitchContext(core_id) now that the task's */ + /* entire context is saved. Note the core id in r0 is the argument. */ + +//krestsp 0 /* unnecessary since KSP is already set and the SP */ + /* is being restored next from the current TCB. */ + +.globl _freertos_restore_ctx +_freertos_restore_ctx: + + ldw r0, r5[r6] /* get this core's current TCB pointer into r0 */ + ldw r0, r0[0x0] /* Get the top of the stack from the current TCB... */ + set sp, r0 /* into the stack pointer register. */ + + /* Restore the current task's context */ +#if 1 + /* Restore VPU regs */ + ldaw r11, sp[37] + {vldc r11[0] + ldaw r11, sp[29]} + {vldd r11[0] + ldaw r11, sp[21]} + vldr r11[0] + /* Restore VPU status and headroom */ + ldw r11, sp[20] + vsetc r11 +#endif + /* Restore standard xs2 regs */ + ldw spc, sp[1] + ldw ssr, sp[2] + ldw sed, sp[3] + ldw et, sp[4] + ldw r5, sp[13] + ldw r6, sp[14] + ldw r7, sp[15] + ldw r8, sp[16] + ldw r9, sp[17] + ldw r10, sp[18] +_freertos_restore_ctx_partial: + ldw dp, sp[5] + ldw cp, sp[6] + ldw lr, sp[7] + ldw r0, sp[8] + ldw r1, sp[9] + ldw r2, sp[10] + ldw r3, sp[11] + ldw r4, sp[12] + {ldw r11, sp[19] + + /* shrink the stack by the size of the context just restored */ + ldaw sp, sp[RTOS_SUPPORT_INTERRUPT_STACK_GROWTH]} + + kret /* exit kernel mode and return to the thread */ + +.cc_bottom kexcept.function + diff --git a/portable/ThirdParty/xClang/XCOREAI/portmacro.h b/portable/ThirdParty/xClang/XCOREAI/portmacro.h new file mode 100644 index 0000000000..36907b258b --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/portmacro.h @@ -0,0 +1,215 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifndef __ASSEMBLER__ + +/* Inclusion of xc1.h will result in clock being defined as a type. + * By default, FreeRTOS will require standard time.h, where clock is a function. + */ +#ifndef USE_XCORE_CLOCK_TYPE +#define _clock_defined +#endif + +#include +#include "rtos_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Type definitions. */ +#define portSTACK_TYPE uint32_t +typedef portSTACK_TYPE StackType_t; +typedef double portDOUBLE; +typedef int32_t BaseType_t; +typedef uint32_t UBaseType_t; + +#define portBASE_TYPE BaseType_t + +#if( configUSE_16_BIT_TICKS == 1 ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#else + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + + /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 +#endif +/*-----------------------------------------------------------*/ + +#endif /* __ASSEMBLER__ */ + +/* Architecture specifics. These can be used by assembly files as well. */ +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portCRITICAL_NESTING_IN_TCB 1 +#define portMAX_CORE_COUNT 8 +#ifndef configNUMBER_OF_CORES +#define configNUMBER_OF_CORES 1 +#endif + +/* This may be set to zero in the config file if the rtos_time +functions are not needed or if it is incremented elsewhere. */ +#ifndef configUPDATE_RTOS_TIME_FROM_TICK_ISR +#define configUPDATE_RTOS_TIME_FROM_TICK_ISR 1 +#endif + +/* + * When entering an ISR we need to grow the stack by one more word than + * we actually need to save the thread context. This is because there are + * some functions, written in assembly *cough* memcpy() *cough*, that think + * it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone + * even though it is normally not necessary to do so. + */ +#define portTHREAD_CONTEXT_STACK_GROWTH RTOS_SUPPORT_INTERRUPT_STACK_GROWTH + +#ifndef __ASSEMBLER__ + +/* Check validity of number of cores specified in config */ +#if ( configNUMBER_OF_CORES < 1 || portMAX_CORE_COUNT < configNUMBER_OF_CORES ) +#error "Invalid number of cores specified in config!" +#endif + +#define portMEMORY_BARRIER() RTOS_MEMORY_BARRIER() +#define portTASK_STACK_DEPTH(pxTaskCode) RTOS_THREAD_STACK_SIZE(pxTaskCode) +/*-----------------------------------------------------------*/ + +/* Scheduler utilities. */ +#define portYIELD() asm volatile( "KCALLI_lu6 0" ::: "memory" ) + +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ +do \ +{ \ + if( xSwitchRequired != pdFALSE ) \ + { \ + extern uint32_t ulPortYieldRequired[ portMAX_CORE_COUNT ]; \ + ulPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; \ + } \ +} while( 0 ) + +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/* SMP utilities. */ +#define portGET_CORE_ID() rtos_core_id_get() + +void vPortYieldOtherCore( int xOtherCoreID ); +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +/*-----------------------------------------------------------*/ + +/* Architecture specific optimisations. */ +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION +#define configUSE_PORT_OPTIMISED_TASK_SELECTION 0 +#endif + +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 + + /* Store/clear the ready priorities in a bit map. */ + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + + /*-----------------------------------------------------------*/ + + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31UL - ( uint32_t ) __builtin_clz( uxReadyPriorities ) ) + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +/*-----------------------------------------------------------*/ + +/* Critical section management. */ + +#define portGET_INTERRUPT_STATE() rtos_interrupt_mask_get() + +/* + * This differs from the standard portDISABLE_INTERRUPTS() + * in that it also returns what the interrupt state was + * before it disabling interrupts. + */ +#define portDISABLE_INTERRUPTS() rtos_interrupt_mask_all() + +#define portENABLE_INTERRUPTS() rtos_interrupt_unmask_all() + +/* + * Port set interrupt mask and clear interrupt mask. + */ +#define portSET_INTERRUPT_MASK() rtos_interrupt_mask_all() +#define portCLEAR_INTERRUPT_MASK( ulState ) rtos_interrupt_mask_set( ulState ) + +#define portSET_INTERRUPT_MASK_FROM_ISR() ( 0 ) +#define portCLEAR_INTERRUPT_MASK_FROM_ISR(x) ( (void) x ) + +/* + * Will enable interrupts if ulState is non-zero. + */ +#define portRESTORE_INTERRUPTS(ulState) rtos_interrupt_mask_set(ulState) + +/* + * Returns non-zero if currently running in an + * ISR or otherwise in kernel mode. + */ +#define portCHECK_IF_IN_ISR() rtos_isr_running() + +#define portASSERT_IF_IN_ISR() configASSERT( portCHECK_IF_IN_ISR() == 0 ) + +#define portGET_ISR_LOCK() rtos_lock_acquire(0) +#define portRELEASE_ISR_LOCK() rtos_lock_release(0) +#define portGET_TASK_LOCK() rtos_lock_acquire(1) +#define portRELEASE_TASK_LOCK() rtos_lock_release(1) + +void vTaskEnterCritical(void); +void vTaskExitCritical(void); +#define portENTER_CRITICAL() vTaskEnterCritical() +#define portEXIT_CRITICAL() vTaskExitCritical() + +extern UBaseType_t vTaskEnterCriticalFromISR( void ); +extern void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ); +#define portENTER_CRITICAL_FROM_ISR vTaskEnterCriticalFromISR +#define portEXIT_CRITICAL_FROM_ISR vTaskExitCriticalFromISR + +/*-----------------------------------------------------------*/ + +/* Runtime stats support */ +#if ( configGENERATE_RUN_TIME_STATS == 1 ) +int xscope_gettime( void ); +#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* nothing needed here */ +#define portGET_RUN_TIME_COUNTER_VALUE() xscope_gettime() +#endif +/*-----------------------------------------------------------*/ + +/* Maps sprintf and snprintf to the lite version in lib_rtos_support */ +#if ( configUSE_DEBUG_SPRINTF == 1 ) +#define sprintf(...) rtos_sprintf(__VA_ARGS__) +#define snprintf(...) rtos_snprintf(__VA_ARGS__) +#endif + +/* Attribute for the pxCallbackFunction member of the Timer_t struct. +Required by xcc to calculate stack usage. */ +#define portTIMER_CALLBACK_ATTRIBUTE __attribute__((fptrgroup("timerCallbackGroup"))) + +/* Timer callback function macros. For xcc this ensures they get added to the timer callback +group so that stack usage for certain functions in timers.c can be calculated. */ +#define portTIMER_CALLBACK_FUNCTION_PROTO( vFunction, xTimer ) void vFunction( TimerHandle_t xTimer ) +#define portTIMER_CALLBACK_FUNCTION( vFunction, xTimer ) portTIMER_CALLBACK_ATTRIBUTE void vFunction( TimerHandle_t xTimer ) + +/*-----------------------------------------------------------*/ + +/* Task function macros as described on the FreeRTOS.org WEB site. These are +not necessary for to use this port. They are defined so the common demo files +(which build with all the ports) will build. */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/*-----------------------------------------------------------*/ + + +#ifdef __cplusplus +} +#endif + +#endif /* __ASSEMBLER__ */ + +#endif /* PORTMACRO_H */ + diff --git a/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h b/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h new file mode 100644 index 0000000000..8b8d1054dd --- /dev/null +++ b/portable/ThirdParty/xClang/XCOREAI/rtos_support_rtos_config.h @@ -0,0 +1,95 @@ +// Copyright (c) 2020, XMOS Ltd, All rights reserved + +#ifndef RTOS_SUPPORT_RTOS_CONFIG_H_ +#define RTOS_SUPPORT_RTOS_CONFIG_H_ + +/** + * Lets the application know that the RTOS in use is FreeRTOS. + */ +#define RTOS_FREERTOS 1 + +/** + * The number of words to extend the stack by when entering an ISR. + * + * When entering an ISR we need to grow the stack by one more word than + * we actually need to save the thread context. This is because there are + * some functions, written in assembly *cough* memcpy() *cough*, that think + * it is OK to store words at SP[0]. Therefore the ISR must leave SP[0] alone + * even though it is normally not necessary to do so. + */ +#define RTOS_SUPPORT_INTERRUPT_STACK_GROWTH ( 44 + 1 ) + +/** + * The word offset into the stack where R1 is to be stored after it + * is extended when saving a thread's context. + */ +#define RTOS_SUPPORT_INTERRUPT_R1_STACK_OFFSET 9 + +/** + * The word offset into the stack where R11 is to be stored after it + * is extended when saving a thread's context. + */ +#define RTOS_SUPPORT_INTERRUPT_R11_STACK_OFFSET 19 + +/** + * The RTOS provided handler that should run when a + * core receives an intercore interrupt request. + */ +#define RTOS_INTERCORE_INTERRUPT_ISR() do { \ + void vIntercoreInterruptISR( void ); \ + vIntercoreInterruptISR(); \ +} while ( 0 ) + +/** + * The number of hardware locks that the RTOS + * requires. For a single core RTOS this could be + * zero. Locks are recursive. + * + * Note that the IRQ routines require a lock and + * will share the first one with the RTOS. + */ +#define RTOS_LOCK_COUNT 2 + +/** + * Remaps all calls to debug_printf() to rtos_printf(). + * When this is on, files should not include both rtos_support.h + * and debug_print.h. + */ +#define RTOS_DEBUG_PRINTF_REMAP 1 + + +#ifdef configENABLE_DEBUG_PRINTF + #if configENABLE_DEBUG_PRINTF + + /* ensure that debug_printf is enabled */ + #ifdef DEBUG_PRINT_ENABLE + #undef DEBUG_PRINT_ENABLE + #endif + #define DEBUG_PRINT_ENABLE 1 + + #ifndef configTASKS_DEBUG + #define configTASKS_DEBUG 0 + #endif + #if configTASKS_DEBUG == 1 + #define DEBUG_PRINT_ENABLE_FREERTOS_TASKS 1 + #else + #define DEBUG_PRINT_DISABLE_FREERTOS_TASKS 1 + #endif + + #else /* configENABLE_DEBUG_PRINTF */ + + /* ensure that debug_printf is disabled */ + #ifdef DEBUG_UNIT + #undef DEBUG_UNIT + #endif + #ifdef DEBUG_PRINT_ENABLE + #undef DEBUG_PRINT_ENABLE + #endif + + #define DEBUG_PRINT_ENABLE 0 + + #endif /* configENABLE_DEBUG_PRINTF */ +#endif + +#endif /* RTOS_SUPPORT_RTOS_CONFIG_H_ */ + diff --git a/queue.c b/queue.c index 29765a54fe..e87db0f45e 100644 --- a/queue.c +++ b/queue.c @@ -89,7 +89,11 @@ typedef struct SemaphoreData * performed just because a higher priority task has been woken. */ #define queueYIELD_IF_USING_PREEMPTION() #else - #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #if ( configNUMBER_OF_CORES == 1 ) + #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + #define queueYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* @@ -1070,7 +1074,15 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * is also a higher priority task in the pending ready list. */ if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } else @@ -1127,7 +1139,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1252,7 +1264,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1298,7 +1310,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1418,7 +1430,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1531,7 +1543,15 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1714,7 +1734,15 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1892,7 +1920,15 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, if( xTaskResumeAll() == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1955,7 +1991,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2015,7 +2051,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -2049,7 +2085,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2070,7 +2106,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index 30093f1af8..b7410fc066 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -98,7 +98,7 @@ do { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ @@ -175,7 +175,7 @@ do { \ UBaseType_t uxSavedInterruptStatus; \ \ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,7 +186,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -1220,7 +1220,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1236,7 +1236,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1251,7 +1251,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1267,7 +1267,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/tasks.c b/tasks.c index 72200ec4f3..b991b7da4b 100644 --- a/tasks.c +++ b/tasks.c @@ -58,14 +58,16 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ -#if ( configUSE_PREEMPTION == 0 ) +#if ( configNUMBER_OF_CORES == 1 ) + #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be * performed just because a higher priority task has been woken. */ - #define taskYIELD_IF_USING_PREEMPTION() -#else - #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() -#endif + #define taskYIELD_IF_USING_PREEMPTION() + #else + #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() + #endif +#endif /* if ( configNUMBER_OF_CORES == 1 ) */ /* Values that can be assigned to the ucNotifyState member of the TCB. */ #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */ @@ -133,7 +135,8 @@ /*-----------------------------------------------------------*/ - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + #if ( configNUMBER_OF_CORES == 1 ) + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ do { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ @@ -149,6 +152,7 @@ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #endif /* if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ @@ -160,6 +164,10 @@ #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + #if ( configNUMBER_OF_CORES > 1 ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION not supported in FreeRTOS SMP. + #endif + /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is * performed in a way that is tailored to the particular microcontroller * architecture being used. */ @@ -251,6 +259,38 @@ #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL #endif +/* Task state. */ +typedef BaseType_t TaskRunning_t; + +/* Indicates that the task is not actively running on any core. */ +#define taskTASK_NOT_RUNNING ( TaskRunning_t ) ( -1 ) + +/* Indicates that the task is actively running but scheduled to yield. */ +#define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) + +/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ +#if ( configNUMBER_OF_CORES == 1 ) + #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB ) ? ( pdTRUE ) : ( pdFALSE ) ) +#else + #define taskTASK_IS_RUNNING( pxTCB ) ( ( ( ( pxTCB )->xTaskRunState >= ( BaseType_t ) 0 ) && ( ( pxTCB )->xTaskRunState < ( BaseType_t ) configNUMBER_OF_CORES ) ) ? ( pdTRUE ) : ( pdFALSE ) ) +#endif + +/* Indicates that the task is an Idle task. */ +#define taskATTRIBUTE_IS_IDLE ( UBaseType_t ) ( 1UL << 0UL ) + +#if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) + #define portGET_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting ) + #define portSET_CRITICAL_NESTING_COUNT( x ) ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting = ( x ) ) + #define portINCREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting++ ) + #define portDECREMENT_CRITICAL_NESTING_COUNT() ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxCriticalNesting-- ) +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portCRITICAL_NESTING_IN_TCB == 1 ) ) */ + +/* Code below here allows infinite loop controlling, especially for the infinite loop + * in idle task function (for example when performing unit tests). */ +#ifndef INFINITE_LOOP + #define INFINITE_LOOP() 1 +#endif + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -264,12 +304,24 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif + #if ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have greater than or equal to the number of bits as confNUM_CORES. */ + #endif + ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ ListItem_t xEventListItem; /**< Used to reference a task from an event list. */ UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */ StackType_t * pxStack; /**< Points to the start of the stack. */ + #if ( configNUMBER_OF_CORES > 1 ) + volatile TaskRunning_t xTaskRunState; /**< Used to identify the core the task is running on, if the task is running. Otherwise, identifies the task's state - not running or yielding. */ + UBaseType_t uxTaskAttributes; /**< Task's attributes - currently used to identify the idle tasks. */ + #endif char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xPreemptionDisable; /**< Used to prevent the task from being preempted. */ + #endif + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif @@ -330,7 +382,15 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ -portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +#if ( configNUMBER_OF_CORES == 1 ) + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +#else + /* MISRA Ref 8.4.1 [Declaration shall be visible] */ + /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */ + /* coverity[misra_c_2012_rule_8_4_violation] */ + portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ]; + #define pxCurrentTCB xTaskGetCurrentTaskHandle() +#endif /* Lists for ready and blocked tasks. -------------------- * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but @@ -368,11 +428,11 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /**< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ]; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -386,24 +446,76 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * moves the task's event list item into the xPendingReadyList, ready for the * kernel to move the task from the pending ready list into the real ready list * when the scheduler is unsuspended. The pending ready list itself can only be - * accessed from a critical section. */ + * accessed from a critical section. + * + * Updates to uxSchedulerSuspended must be protected by both the task lock and the ISR lock + * and must not be done from an ISR. Reads must be protected by either lock and may be done + * from either an ISR or a task. */ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U; #if ( configGENERATE_RUN_TIME_STATS == 1 ) /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /**< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /**< Holds the total amount of execution time as defined by the run time counter clock. */ +PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the value of a timer/counter the last time a task was switched in. */ +PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ configNUMBER_OF_CORES ] = { 0U }; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) + +/* Do not move these variables to function scope as doing so prevents the + * code working with debuggers that need to remove the static qualifier. */ + static StaticTask_t xIdleTCBBuffers[ configNUMBER_OF_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUMBER_OF_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + +#endif /* #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ + /*lint -restore */ /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ +/* + * Creates the idle tasks during scheduler start. + */ +static BaseType_t prvCreateIdleTasks( void ); + +#if ( configNUMBER_OF_CORES > 1 ) + +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields, if so. + */ + static void prvCheckForRunStateChange( void ); +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +#if ( configNUMBER_OF_CORES > 1 ) + +/* + * Yields the given core. + */ + static void prvYieldCore( BaseType_t xCoreID ); +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +#if ( configNUMBER_OF_CORES > 1 ) + +/* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ + static void prvYieldForTask( const TCB_t * pxTCB ); +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +#if ( configNUMBER_OF_CORES > 1 ) + +/* + * Selects the highest priority available task for the given core. + */ + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ); +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + /** * Utility task that simply returns pdTRUE if the task referenced by xTask is * currently in the Suspended state, or pdFALSE if the task referenced by xTask @@ -426,13 +538,21 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * The idle task is automatically created and added to the ready lists upon * creation of the first user task. * + * In the FreeRTOS SMP, configNUMBER_OF_CORES - 1 minimal idle tasks are also + * created to ensure that each core has an idle task to run when no other + * task is available to run. + * * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific - * language extensions. The equivalent prototype for this function is: + * language extensions. The equivalent prototype for these functions are: * * void prvIdleTask( void *pvParameters ); + * void prvMinimalIdleTask( void *pvParameters ); * */ static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#if ( configNUMBER_OF_CORES > 1 ) + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#endif /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -561,176 +681,705 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif -/*-----------------------------------------------------------*/ +#if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + extern void vApplicationMinimalIdleHook( void ); +#endif /* #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) */ -#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) +/*-----------------------------------------------------------*/ - TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const uint32_t ulStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer ) +#if ( configNUMBER_OF_CORES > 1 ) + static void prvCheckForRunStateChange( void ) { - TCB_t * pxNewTCB; - TaskHandle_t xReturn; + UBaseType_t uxPrevCriticalNesting; + const TCB_t * pxThisTCB; + + /* This should be skipped if called from an ISR. If the task on the current + * core is no longer running, then vTaskSwitchContext() probably should + * be run before returning, but we don't have a way to force that to happen + * from here. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + /* This function is always called with interrupts disabled + * so this is safe. */ + pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; + + while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + { + /* We are only here if we just entered a critical section + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then, do it all over again + * if our state changed again during the reacquisition. */ + uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT(); + + if( uxPrevCriticalNesting > 0U ) + { + portSET_CRITICAL_NESTING_COUNT( 0U ); + portRELEASE_ISR_LOCK(); + } + else + { + /* The scheduler is suspended. uxSchedulerSuspended is updated + * only when the task is not requested to yield. */ + mtCOVERAGE_TEST_MARKER(); + } - configASSERT( puxStackBuffer != NULL ); - configASSERT( pxTaskBuffer != NULL ); + portRELEASE_TASK_LOCK(); - #if ( configASSERT_DEFINED == 1 ) - { - /* Sanity check that the size of the structure used to declare a - * variable of type StaticTask_t equals the size of the real task - * structure. */ - volatile size_t xSize = sizeof( StaticTask_t ); - configASSERT( xSize == sizeof( TCB_t ) ); - ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ - } - #endif /* configASSERT_DEFINED */ + portMEMORY_BARRIER(); + configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); - if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) - { - /* The memory used for the task's TCB and stack are passed into this - * function - use them. */ - pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); - pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + portENABLE_INTERRUPTS(); - #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ - { - /* Tasks can be created statically or dynamically, so note this - * task was created statically in case the task is later deleted. */ - pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + /* Enabling interrupts should cause this core to immediately + * service the pending interrupt and yield. If the run state is still + * yielding here then that is a problem. */ + configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); + + portDISABLE_INTERRUPTS(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + + portSET_CRITICAL_NESTING_COUNT( uxPrevCriticalNesting ); + + if( uxPrevCriticalNesting == 0U ) + { + portRELEASE_ISR_LOCK(); + } } - #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + } + } +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ - prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); - prvAddNewTaskToReadyList( pxNewTCB ); +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + static void prvYieldCore( BaseType_t xCoreID ) + { + /* This must be called from a critical section and xCoreID must be valid. */ + if( ( portCHECK_IF_IN_ISR() == pdTRUE ) && ( xCoreID == portGET_CORE_ID() ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; } else { - xReturn = NULL; + if( pxCurrentTCBs[ xCoreID ]->xTaskRunState != taskTASK_YIELDING ) + { + if( xCoreID == portGET_CORE_ID() ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + portYIELD_CORE( xCoreID ); + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_YIELDING; + } + } } - - return xReturn; } - -#endif /* SUPPORT_STATIC_ALLOCATION */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) - - BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, - TaskHandle_t * pxCreatedTask ) +#if ( configNUMBER_OF_CORES > 1 ) + static void prvYieldForTask( const TCB_t * pxTCB ) { - TCB_t * pxNewTCB; - BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + BaseType_t xLowestPriorityToPreempt; + BaseType_t xCurrentCoreTaskPriority; + BaseType_t xLowestPriorityCore = ( BaseType_t ) -1; + BaseType_t xCoreID; - configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); - configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + BaseType_t xYieldCount = 0; + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ - if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + /* This must be called from a critical section. */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority >= uxTopReadyPriority ) + #else + /* Yield is not required for a task which is already running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) + #endif { - /* Allocate space for the TCB. Where the memory comes from depends - * on the implementation of the port malloc function and whether or - * not static allocation is being used. */ - pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + xLowestPriorityToPreempt = ( BaseType_t ) pxTCB->uxPriority; - /* Store the stack location in the TCB. */ - pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + /* xLowestPriorityToPreempt will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriorityToPreempt; - #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) { - /* Tasks can be created statically or dynamically, so note this - * task was created statically in case the task is later deleted. */ - pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + xCurrentCoreTaskPriority = ( BaseType_t ) pxCurrentTCBs[ xCoreID ]->uxPriority; + + /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here. */ + if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + xCurrentCoreTaskPriority = xCurrentCoreTaskPriority - 1; + } + + if( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) != pdFALSE ) && ( xYieldPendings[ xCoreID ] == pdFALSE ) ) + { + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdFALSE ) + #endif + { + if( xCurrentCoreTaskPriority <= xLowestPriorityToPreempt ) + { + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) + #endif + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriorityToPreempt = xCurrentCoreTaskPriority; + xLowestPriorityCore = xCoreID; + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* Yield all currently running non-idle tasks with a priority lower than + * the task that needs to run. */ + if( ( xCurrentCoreTaskPriority > ( ( BaseType_t ) tskIDLE_PRIORITY - 1 ) ) && + ( xCurrentCoreTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) + { + prvYieldCore( xCoreID ); + xYieldCount++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ - prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, - pxTaskDefinition->pcName, - ( uint32_t ) pxTaskDefinition->usStackDepth, - pxTaskDefinition->pvParameters, - pxTaskDefinition->uxPriority, - pxCreatedTask, pxNewTCB, - pxTaskDefinition->xRegions ); + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + if( ( xYieldCount == 0 ) && ( xLowestPriorityCore >= 0 ) ) + #else /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + if( xLowestPriorityCore >= 0 ) + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + { + prvYieldCore( xLowestPriorityCore ); + } - prvAddNewTaskToReadyList( pxNewTCB ); - xReturn = pdPASS; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + /* Verify that the calling core always yields to higher priority tasks. */ + if( ( ( pxCurrentTCBs[ portGET_CORE_ID() ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 ) && + ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) + { + configASSERT( ( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) || + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ] ) == pdFALSE ) ); + } + #endif } - - return xReturn; } - -#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - - BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, - TaskHandle_t * pxCreatedTask ) +#if ( configNUMBER_OF_CORES > 1 ) + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ) { - TCB_t * pxNewTCB; - BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + UBaseType_t uxCurrentPriority = uxTopReadyPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xDecrementTopPriority = pdTRUE; - configASSERT( pxTaskDefinition->puxStackBuffer ); + #if ( configUSE_CORE_AFFINITY == 1 ) + const TCB_t * pxPreviousTCB = NULL; + #endif + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + BaseType_t xPriorityDropped = pdFALSE; + #endif - if( pxTaskDefinition->puxStackBuffer != NULL ) + /* This function should be called when scheduler is running. */ + configASSERT( xSchedulerRunning == pdTRUE ); + + /* A new task is created and a running task with the same priority yields + * itself to run the new task. When a running task yields itself, it is still + * in the ready list. This running task will be selected before the new task + * since the new task is always added to the end of the ready list. + * The other problem is that the running task still in the same position of + * the ready list when it yields itself. It is possible that it will be selected + * earlier then other tasks which waits longer than this task. + * + * To fix these problems, the running task should be put to the end of the + * ready list before searching for the ready task in the ready list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), + &pxCurrentTCBs[ xCoreID ]->xStateListItem ) == pdTRUE ) { - /* Allocate space for the TCB. Where the memory comes from depends - * on the implementation of the port malloc function and whether or - * not static allocation is being used. */ - pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + ( void ) uxListRemove( &pxCurrentTCBs[ xCoreID ]->xStateListItem ); + vListInsertEnd( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ), + &pxCurrentTCBs[ xCoreID ]->xStateListItem ); + } - if( pxNewTCB != NULL ) + while( xTaskScheduled == pdFALSE ) + { + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); - - /* Store the stack location in the TCB. */ - pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; - - #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + if( uxCurrentPriority < uxTopReadyPriority ) { - /* Tasks can be created statically or dynamically, so note - * this task had a statically allocated stack in case it is - * later deleted. The TCB was allocated dynamically. */ - pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + /* We can't schedule any tasks, other than idle, that have a + * priority lower than the priority of a task currently running + * on another core. */ + uxCurrentPriority = tskIDLE_PRIORITY; } - #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ - - prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, - pxTaskDefinition->pcName, - ( uint32_t ) pxTaskDefinition->usStackDepth, - pxTaskDefinition->pvParameters, - pxTaskDefinition->uxPriority, - pxCreatedTask, pxNewTCB, - pxTaskDefinition->xRegions ); - - prvAddNewTaskToReadyList( pxNewTCB ); - xReturn = pdPASS; } - } + #endif - return xReturn; - } + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) + { + const List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); + const ListItem_t * pxEndMarker = listGET_END_MARKER( pxReadyList ); + ListItem_t * pxIterator; -#endif /* portUSING_MPU_WRAPPERS */ -/*-----------------------------------------------------------*/ + /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority + * must not be decremented any further. */ + xDecrementTopPriority = pdFALSE; -#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + for( pxIterator = listGET_HEAD_ENTRY( pxReadyList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) ) + { + TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxIterator ); - BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const configSTACK_DEPTH_TYPE usStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - TaskHandle_t * const pxCreatedTask ) - { - TCB_t * pxNewTCB; - BaseType_t xReturn; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + /* When falling back to the idle priority because only one priority + * level is allowed to run at a time, we should ONLY schedule the true + * idle tasks, not user tasks at the idle priority. */ + if( uxCurrentPriority < uxTopReadyPriority ) + { + if( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0 ) + { + continue; + } + } + } + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) + #endif + { + /* If the task is not being executed by any core swap it in. */ + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; + #if ( configUSE_CORE_AFFINITY == 1 ) + pxPreviousTCB = pxCurrentTCBs[ xCoreID ]; + #endif + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + pxCurrentTCBs[ xCoreID ] = pxTCB; + xTaskScheduled = pdTRUE; + } + } + else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) + { + configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); + + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) + #endif + { + /* The task is already running on this core, mark it as scheduled. */ + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + xTaskScheduled = pdTRUE; + } + } + else + { + /* This task is running on the core other than xCoreID. */ + mtCOVERAGE_TEST_MARKER(); + } + + if( xTaskScheduled != pdFALSE ) + { + /* A task has been selected to run on this core. */ + break; + } + } + } + else + { + if( xDecrementTopPriority != pdFALSE ) + { + uxTopReadyPriority--; + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + xPriorityDropped = pdTRUE; + } + #endif + } + } + + /* There are configNUMBER_OF_CORES Idle tasks created when scheduler started. + * The scheduler should be able to select a task to run when uxCurrentPriority + * is tskIDLE_PRIORITY. uxCurrentPriority is never decreased to value blow + * tskIDLE_PRIORITY. */ + if( uxCurrentPriority > tskIDLE_PRIORITY ) + { + uxCurrentPriority--; + } + else + { + /* This function is called when idle task is not created. Break the + * loop to prevent uxCurrentPriority overrun. */ + break; + } + } + + #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) + { + if( xTaskScheduled == pdTRUE ) + { + if( xPriorityDropped != pdFALSE ) + { + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + BaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUMBER_OF_CORES; x++ ) + { + if( ( pxCurrentTCBs[ x ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0 ) + { + prvYieldCore( x ); + } + } + } + } + } + #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ + + #if ( configUSE_CORE_AFFINITY == 1 ) + { + if( xTaskScheduled == pdTRUE ) + { + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + { + /* A ready task was just evicted from this core. See if it can be + * scheduled on any other core. */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = ( BaseType_t ) pxPreviousTCB->uxPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t x; + + if( ( pxPreviousTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + xLowestPriority = xLowestPriority - 1; + } + + if( ( uxCoreMap & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } + + uxCoreMap &= ( ( 1U << configNUMBER_OF_CORES ) - 1U ); + + for( x = ( ( BaseType_t ) configNUMBER_OF_CORES - 1 ); x >= ( BaseType_t ) 0; x-- ) + { + UBaseType_t uxCore = ( UBaseType_t ) x; + BaseType_t xTaskPriority; + + if( ( uxCoreMap & ( ( UBaseType_t ) 1U << uxCore ) ) != 0U ) + { + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority; + + if( ( pxCurrentTCBs[ uxCore ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + xTaskPriority = xTaskPriority - ( BaseType_t ) 1; + } + + uxCoreMap &= ~( ( UBaseType_t ) 1U << uxCore ); + + if( ( xTaskPriority < xLowestPriority ) && + ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ] ) != pdFALSE ) && + ( xYieldPendings[ uxCore ] == pdFALSE ) ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = ( BaseType_t ) uxCore; + } + } + } + } + + if( xLowestPriorityCore >= 0 ) + { + prvYieldCore( xLowestPriorityCore ); + } + } + } + } + #endif /* #if ( configUSE_CORE_AFFINITY == 1 ) */ + } + +#endif /* ( configNUMBER_OF_CORES > 1 ) */ + +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateStaticAffinitySet( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY ); + } + + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + { + TCB_t * pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if ( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + * variable of type StaticTask_t equals the size of the real task + * structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + } + #endif /* configASSERT_DEFINED */ + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + * function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + * task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + * on the implementation of the port malloc function and whether or + * not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + * task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note + * this task had a statically allocated stack in case it is + * later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateAffinitySet( pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn; /* If the stack grows down then allocate the stack then the TCB so the stack * does not grow into the TCB. Likewise if the stack grows up then allocate @@ -744,7 +1393,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxNewTCB != NULL ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Allocate space for the stack used by the task being created. * The base of the stack memory stored in the TCB so the task can @@ -773,7 +1422,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( pxNewTCB != NULL ) { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + ( void ) memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxStack; @@ -803,6 +1452,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it. */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + prvAddNewTaskToReadyList( pxNewTCB ); xReturn = pdPASS; } @@ -962,6 +1619,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; + } + #endif + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { + pxNewTCB->xPreemptionDisable = 0; + } + #endif + /* Initialize the TCB stack to look as if the task was already running, * but had been interrupted by the scheduler. The return address is set * to the start of the task function. Once the stack has been initialised @@ -1014,39 +1683,105 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* portUSING_MPU_WRAPPERS */ - if( pxCreatedTask != NULL ) - { - /* Pass the handle out in an anonymous way. The handle can be used to - * change the created task's priority, delete the created task, etc.*/ - *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } -} -/*-----------------------------------------------------------*/ + /* Initialize task state and task attributes. */ + #if ( configNUMBER_OF_CORES > 1 ) + { + pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; + + /* Is this an idle task? */ + if( ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvIdleTask ) || ( ( TaskFunction_t ) pxTaskCode == ( TaskFunction_t ) prvMinimalIdleTask ) ) + { + pxNewTCB->uxTaskAttributes |= taskATTRIBUTE_IS_IDLE; + } + } + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + + if( pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + * change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES == 1 ) + + static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) + { + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + + if( pxCurrentTCB == NULL ) + { + /* There are no other tasks, or all the other tasks are in + * the suspended state - make this the current task. */ + pxCurrentTCB = pxNewTCB; + + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + * current task if it is the highest priority task to be created + * so far. */ + if( xSchedulerRunning == pdFALSE ) + { + if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + { + pxCurrentTCB = pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + uxTaskNumber++; -static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) -{ - /* Ensure interrupts don't access the task lists while the lists are being - * updated. */ - taskENTER_CRITICAL(); - { - uxCurrentNumberOfTasks++; + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); - if( pxCurrentTCB == NULL ) - { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ - pxCurrentTCB = pxNewTCB; + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(); - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than the current task + * then it should run now. */ + if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) { - /* This is the first task to be created so do the preliminary - * initialisation required. We will not recover if this call - * fails, but we will report the failure. */ - prvInitialiseTaskLists(); + taskYIELD_IF_USING_PREEMPTION(); } else { @@ -1055,60 +1790,91 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } else { - /* If the scheduler is not already running, make this task the - * current task if it is the highest priority task to be created - * so far. */ + mtCOVERAGE_TEST_MARKER(); + } + } + +#else /* #if ( configNUMBER_OF_CORES == 1 ) */ + + static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) + { + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + if( xSchedulerRunning == pdFALSE ) { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { - pxCurrentTCB = pxNewTCB; + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( pxNewTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) + { + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) + { + if( pxCurrentTCBs[ xCoreID ] == NULL ) + { + pxNewTCB->xTaskRunState = xCoreID; + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - uxTaskNumber++; + uxTaskNumber++; - #if ( configUSE_TRACE_FACILITY == 1 ) - { - /* Add a counter into the TCB for tracing only. */ - pxNewTCB->uxTCBNumber = uxTaskNumber; - } - #endif /* configUSE_TRACE_FACILITY */ - traceTASK_CREATE( pxNewTCB ); + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); - prvAddTaskToReadyList( pxNewTCB ); + prvAddTaskToReadyList( pxNewTCB ); - portSETUP_TCB( pxNewTCB ); - } - taskEXIT_CRITICAL(); + portSETUP_TCB( pxNewTCB ); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than another + * currently running task and preemption is on then it should + * run now. */ + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxNewTCB ); + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + taskEXIT_CRITICAL(); } - else - { - mtCOVERAGE_TEST_MARKER(); - } -} + +#endif /* #if ( configNUMBER_OF_CORES == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskDelete == 1 ) @@ -1149,9 +1915,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * not return. */ uxTaskNumber++; - if( pxTCB == pxCurrentTCB ) + /* If the task is running (or yielding), we must add it to the + * termination list so that an idle task can delete it when it is + * no longer running. */ + #if ( configNUMBER_OF_CORES == 1 ) + if( pxTCB == pxCurrentTCB ) + #else + if( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ) + #endif { - /* A task is deleting itself. This cannot complete within the + /* A running task is being deleted. This cannot complete within the * task itself, as a context switch to another task is required. * Place the task in the termination list. The idle task will * check the termination list and free up any memory allocated by @@ -1172,7 +1945,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + #if ( configNUMBER_OF_CORES == 1 ) + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ 0 ] ); + #else + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ pxTCB->xTaskRunState ] ); + #endif } else { @@ -1184,30 +1961,61 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); - /* If the task is not deleting itself, call prvDeleteTCB from outside of - * critical section. If a task deletes itself, prvDeleteTCB is called - * from prvCheckTasksWaitingTermination which is called from Idle task. */ - if( pxTCB != pxCurrentTCB ) + #if ( configNUMBER_OF_CORES == 1 ) { - prvDeleteTCB( pxTCB ); - } + taskEXIT_CRITICAL(); - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ - if( xSchedulerRunning != pdFALSE ) + /* If the task is not deleting itself, call prvDeleteTCB from outside of + * critical section. If a task deletes itself, prvDeleteTCB is called + * from prvCheckTasksWaitingTermination which is called from Idle task. */ + if( pxTCB != pxCurrentTCB ) + { + prvDeleteTCB( pxTCB ); + } + + /* Force a reschedule if it is the currently running task that has just + * been deleted. */ + if( xSchedulerRunning != pdFALSE ) + { + if( pxTCB == pxCurrentTCB ) + { + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - if( pxTCB == pxCurrentTCB ) + /* If a running task is not deleting itself, call prvDeleteTCB. If a running + * task deletes itself, prvDeleteTCB is called from prvCheckTasksWaitingTermination + * which is called from Idle task. */ + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) { - configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); - portYIELD_WITHIN_API(); + prvDeleteTCB( pxTCB ); } - else + + /* Force a reschedule if the task that has just been deleted was running. */ + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) { - mtCOVERAGE_TEST_MARKER(); + if( pxTCB->xTaskRunState == portGET_CORE_ID() ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( pxTCB->xTaskRunState ); + } } + + taskEXIT_CRITICAL(); } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* INCLUDE_vTaskDelete */ @@ -1223,7 +2031,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxPreviousWakeTime ); configASSERT( ( xTimeIncrement > 0U ) ); - configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); vTaskSuspendAll(); { @@ -1231,6 +2038,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * block. */ const TickType_t xConstTickCount = xTickCount; + configASSERT( uxSchedulerSuspended == 1U ); + /* Generate the tick time at which the task wants to wake. */ xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; @@ -1287,7 +2096,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + portYIELD_WITHIN_API(); + #else + vTaskYieldWithinAPI(); + #endif } else { @@ -1309,9 +2122,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* A delay time of zero just forces a reschedule. */ if( xTicksToDelay > ( TickType_t ) 0U ) { - configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); vTaskSuspendAll(); { + configASSERT( uxSchedulerSuspended == 1U ); + traceTASK_DELAY(); /* A task that is removed from the event list while the @@ -1334,7 +2148,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * have put ourselves to sleep. */ if( xAlreadyYielded == pdFALSE ) { - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + portYIELD_WITHIN_API(); + #else + vTaskYieldWithinAPI(); + #endif } else { @@ -1358,12 +2176,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - if( pxTCB == pxCurrentTCB ) - { - /* The task calling this function is querying its own state. */ - eReturn = eRunning; - } - else + #if ( configNUMBER_OF_CORES == 1 ) + if( pxTCB == pxCurrentTCB ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else + #endif { taskENTER_CRITICAL(); { @@ -1407,7 +2227,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * suspended. */ eReturn = eSuspended; - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) { @@ -1441,9 +2261,27 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ { - /* If the task is not in any other state, it must be in the - * Ready (including pending ready) state. */ - eReturn = eReady; + #if ( configNUMBER_OF_CORES == 1 ) + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + eReturn = eReady; + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + /* Is it actively running on a core? */ + eReturn = eRunning; + } + else + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + eReturn = eReady; + } + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } @@ -1481,7 +2319,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t const * pxTCB; UBaseType_t uxReturn; - UBaseType_t uxSavedInterruptState; + UBaseType_t uxSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -1501,14 +2339,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } @@ -1525,6 +2363,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; BaseType_t xYieldRequired = pdFALSE; + #if ( configNUMBER_OF_CORES > 1 ) + BaseType_t xYieldForTask = pdFALSE; + #endif + configASSERT( uxNewPriority < configMAX_PRIORITIES ); /* Ensure the new priority is valid. */ @@ -1558,36 +2400,51 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( uxCurrentBasePriority != uxNewPriority ) { /* The priority change may have readied a task of higher - * priority than the calling task. */ + * priority than a running task. */ if( uxNewPriority > uxCurrentBasePriority ) { - if( pxTCB != pxCurrentTCB ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* The priority of a task other than the currently - * running task is being raised. Is the priority being - * raised above that of the running task? */ - if( uxNewPriority > pxCurrentTCB->uxPriority ) + if( pxTCB != pxCurrentTCB ) { - xYieldRequired = pdTRUE; + /* The priority of a task other than the currently + * running task is being raised. Is the priority being + * raised above that of the running task? */ + if( uxNewPriority > pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); + /* The priority of the running task is being raised, + * but the running task must already be the highest + * priority task able to run so no yield is required. */ } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - /* The priority of the running task is being raised, - * but the running task must already be the highest - * priority task able to run so no yield is required. */ + /* The priority of a task is being raised so + * perform a yield for this task later. */ + xYieldForTask = pdTRUE; } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - else if( pxTCB == pxCurrentTCB ) + else if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - /* Setting the priority of the running task down means + /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ - xYieldRequired = pdTRUE; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxTCB->xPreemptionDisable == pdFALSE ) + #endif + { + xYieldRequired = pdTRUE; + } } else { @@ -1614,72 +2471,232 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } - /* The base priority gets set whatever. */ - pxTCB->uxBasePriority = uxNewPriority; - } - #else /* if ( configUSE_MUTEXES == 1 ) */ - { - pxTCB->uxPriority = uxNewPriority; - } - #endif /* if ( configUSE_MUTEXES == 1 ) */ + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else /* if ( configUSE_MUTEXES == 1 ) */ + { + pxTCB->uxPriority = uxNewPriority; + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + * nothing more than change its priority variable. However, if + * the task is in a ready list it needs to be removed and placed + * in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + * adding it to its new ready list. As we are in a critical + * section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + #if ( configNUMBER_OF_CORES == 1 ) + { + mtCOVERAGE_TEST_MARKER(); + } + #else + { + /* It's possible that xYieldForTask was already set to pdTRUE because + * its priority is being raised. However, since it is not in a ready list + * we don't actually need to yield for it. */ + xYieldForTask = pdFALSE; + } + #endif + } + + #if ( configNUMBER_OF_CORES == 1 ) + { + if( xYieldRequired != pdFALSE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + #if ( configUSE_PREEMPTION == 1 ) + { + if( xYieldRequired != pdFALSE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + else if( xYieldForTask != pdFALSE ) + { + prvYieldForTask( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + + /* Remove compiler warning about unused variables when the port + * optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + UBaseType_t uxPrevCoreAffinityMask; + + #if ( configUSE_PREEMPTION == 1 ) + UBaseType_t uxPrevNotAllowedCores; + #endif + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + uxPrevCoreAffinityMask = pxTCB->uxCoreAffinityMask; + pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + + /* If the task can no longer run on the core it was running, + * request the core to yield. */ + if( ( uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) == 0U ) + { + prvYieldCore( xCoreID ); + } } else { - mtCOVERAGE_TEST_MARKER(); - } - - /* If the task is in the blocked or suspended list we need do - * nothing more than change its priority variable. However, if - * the task is in a ready list it needs to be removed and placed - * in the list appropriate to its new priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - /* The task is currently in its ready list - remove before - * adding it to its new ready list. As we are in a critical - * section we can do this even if the scheduler is suspended. */ - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + #if ( configUSE_PREEMPTION == 1 ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + /* Calculate the cores on which this task was not allowed to + * run previously. */ + uxPrevNotAllowedCores = ( ~uxPrevCoreAffinityMask ) & ( ( 1U << configNUMBER_OF_CORES ) - 1U ); + + /* Does the new core mask enables this task to run on any of the + * previously not allowed cores? If yes, check if this task can be + * scheduled on any of those cores. */ + if( ( uxPrevNotAllowedCores & uxCoreAffinityMask ) != 0U ) + { + prvYieldForTask( pxTCB ); + } } - else + #else /* #if( configUSE_PREEMPTION == 1 ) */ { mtCOVERAGE_TEST_MARKER(); } - - prvAddTaskToReadyList( pxTCB ); - } - else - { - mtCOVERAGE_TEST_MARKER(); + #endif /* #if( configUSE_PREEMPTION == 1 ) */ } + } + } + taskEXIT_CRITICAL(); + } +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ +/*-----------------------------------------------------------*/ - if( xYieldRequired != pdFALSE ) - { - taskYIELD_IF_USING_PREEMPTION(); - } - else +#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask ) + { + const TCB_t * pxTCB; + UBaseType_t uxCoreAffinityMask; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + taskEXIT_CRITICAL(); + + return uxCoreAffinityMask; + } +#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) */ + +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionDisable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdTRUE; + } + taskEXIT_CRITICAL(); + } + +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdFALSE; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - mtCOVERAGE_TEST_MARKER(); + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + prvYieldCore( xCoreID ); } - - /* Remove compiler warning about unused variables when the port - * optimised task selection is not being used. */ - ( void ) uxPriorityUsedOnEntry; } } taskEXIT_CRITICAL(); } -#endif /* INCLUDE_vTaskPrioritySet */ +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ /*-----------------------------------------------------------*/ #if ( INCLUDE_vTaskSuspend == 1 ) @@ -1688,6 +2705,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t * pxTCB; + #if ( configNUMBER_OF_CORES > 1 ) + TaskRunning_t xTaskRunningOnCore; + #endif + taskENTER_CRITICAL(); { /* If null is passed in here then it is the running task that is @@ -1696,6 +2717,10 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) traceTASK_SUSPEND( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + xTaskRunningOnCore = pxTCB->xTaskRunState; + #endif + /* Remove task from the ready/delayed list and place in the * suspended list. */ if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) @@ -1723,7 +2748,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { BaseType_t x; - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) { @@ -1735,54 +2760,104 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); - { - prvResetNextTaskUnblockTime(); - } taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - if( pxTCB == pxCurrentTCB ) - { if( xSchedulerRunning != pdFALSE ) { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); - portYIELD_WITHIN_API(); + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + taskENTER_CRITICAL(); + { + prvResetNextTaskUnblockTime(); + } + taskEXIT_CRITICAL(); } else { - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + mtCOVERAGE_TEST_MARKER(); + } + + if( pxTCB == pxCurrentTCB ) + { + if( xSchedulerRunning != pdFALSE ) { - /* No other tasks are ready, so set pxCurrentTCB back to - * NULL so when the next task is created pxCurrentTCB will - * be set to point to it no matter what its relative priority - * is. */ - pxCurrentTCB = NULL; + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + portYIELD_WITHIN_API(); } else { - vTaskSwitchContext(); + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCB back to + * NULL so when the next task is created pxCurrentTCB will + * be set to point to it no matter what its relative priority + * is. */ + pxCurrentTCB = NULL; + } + else + { + vTaskSwitchContext(); + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( xTaskRunningOnCore == portGET_CORE_ID() ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + } + else + { + /* This code path is not possible because only Idle tasks are + * assigned a core before the scheduler is started ( i.e. + * taskTASK_IS_RUNNING is only true for idle tasks before + * the scheduler is started ) and idle tasks cannot be + * suspended. */ + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskEXIT_CRITICAL(); } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } #endif /* INCLUDE_vTaskSuspend */ @@ -1843,9 +2918,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* It does not make sense to resume the calling task. */ configASSERT( xTaskToResume ); - /* The parameter cannot be NULL as it is impossible to resume the - * currently executing task. */ - if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + #if ( configNUMBER_OF_CORES == 1 ) + + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. */ + if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + #else + + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. It is also impossible to resume a task + * that is actively running on another core but it is not safe + * to check their run state here. Therefore, we get into a critical + * section and check if the task is actually suspended or not. */ + if( pxTCB != NULL ) + #endif { taskENTER_CRITICAL(); { @@ -1858,18 +2944,30 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* This yield may not cause the task just resumed to run, - * but will leave the lists in the correct state for the - * next yield. */ - taskYIELD_IF_USING_PREEMPTION(); + /* A higher priority task may have just been resumed. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* This yield may not cause the task just resumed to run, + * but will leave the lists in the correct state for the + * next yield. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB ); + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -1916,97 +3014,248 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); - { - if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + { + #if ( configNUMBER_OF_CORES == 1 ) + { + /* Ready lists can be accessed so move the task from the + * suspended list to the ready list directly. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; + + /* Mark that a yield is pending in case the user is not + * using the return value to initiate a context switch + * from the ISR using portYIELD_FROM_ISR. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + * is held in the pending ready list until the scheduler is + * unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) + { + prvYieldForTask( pxTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xYieldRequired = pdTRUE; + } + } + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCreateIdleTasks( void ) +{ + BaseType_t xReturn = pdPASS; + + #if ( configNUMBER_OF_CORES == 1 ) + { + /* Add the idle task at the lowest priority. */ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ 0 ] = xTaskCreateStatic( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + + if( xIdleTaskHandles[ 0 ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ 0 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + BaseType_t xCoreID; + char cIdleName[ configMAX_TASK_NAME_LEN ]; + + /* Add each idle task at the lowest priority. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) + { + BaseType_t x; + + if( xReturn == pdFAIL ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - traceTASK_RESUME_FROM_ISR( pxTCB ); + cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; - /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( cIdleName[ x ] == ( char ) 0x00 ) { - /* Ready lists can be accessed so move the task from the - * suspended list to the ready list directly. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - xYieldRequired = pdTRUE; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } - /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using portYIELD_FROM_ISR. */ - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Append the idle task number to the end of the name if there is space. */ + if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = ( char ) ( xCoreID + '0' ); + x++; - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); + /* And append a null character if there is space. */ + if( x < ( BaseType_t ) configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = '\0'; } else { - /* The delayed or ready lists cannot be accessed so the task - * is held in the pending ready list until the scheduler is - * unsuspended. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } - } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); - return xYieldRequired; + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + if( xCoreID == 0 ) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + else + { + xIdleTaskHandles[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + xIdleTaskStackBuffers[ xCoreID - 1 ], + &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + if( xIdleTaskHandles[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + if( xCoreID == 0 ) + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandles[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + + return xReturn; +} -#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ /*-----------------------------------------------------------*/ void vTaskStartScheduler( void ) { BaseType_t xReturn; - /* Add the idle task at the lowest priority. */ - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) - { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, - configIDLE_TASK_NAME, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - - if( xIdleTaskHandle != NULL ) - { - xReturn = pdPASS; - } - else - { - xReturn = pdFAIL; - } - } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ - { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - configIDLE_TASK_NAME, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif /* configSUPPORT_STATIC_ALLOCATION */ + xReturn = prvCreateIdleTasks(); #if ( configUSE_TIMERS == 1 ) { @@ -2081,8 +3330,8 @@ void vTaskStartScheduler( void ) } /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, - * meaning xIdleTaskHandle is not used anywhere else. */ - ( void ) xIdleTaskHandle; + * meaning xIdleTaskHandles are not used anywhere else. */ + ( void ) xIdleTaskHandles; /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority * from getting optimized out as it is no longer used by the kernel. */ @@ -2103,23 +3352,83 @@ void vTaskEndScheduler( void ) void vTaskSuspendAll( void ) { - /* A critical section is not required as the variable is of type - * BaseType_t. Please read Richard Barry's reply in the following link to a - * post in the FreeRTOS support forum before reporting this as a bug! - - * https://goo.gl/wu4acr */ - - /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that - * do not otherwise exhibit real time behaviour. */ - portSOFTWARE_BARRIER(); - - /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment - * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; - - /* Enforces ordering for ports and optimised compilers that may otherwise place - * the above increment elsewhere. */ - portMEMORY_BARRIER(); + #if ( configNUMBER_OF_CORES == 1 ) + { + /* A critical section is not required as the variable is of type + * BaseType_t. Please read Richard Barry's reply in the following link to a + * post in the FreeRTOS support forum before reporting this as a bug! - + * https://goo.gl/wu4acr */ + + /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + + /* Enforces ordering for ports and optimised compilers that may otherwise place + * the above increment elsewhere. */ + portMEMORY_BARRIER(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + UBaseType_t ulState; + + /* This must only be called from within a task. */ + portASSERT_IF_IN_ISR(); + + if( xSchedulerRunning != pdFALSE ) + { + /* Writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + * We must disable interrupts before we grab the locks in the event that this task is + * interrupted and switches context before incrementing uxSchedulerSuspended. + * It is safe to re-enable interrupts after releasing the ISR lock and incrementing + * uxSchedulerSuspended since that will prevent context switches. */ + ulState = portSET_INTERRUPT_MASK(); + + /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + portGET_TASK_LOCK(); + + /* uxSchedulerSuspended is increased after prvCheckForRunStateChange. The + * purpose is to prevent altering the variable when fromISR APIs are readying + * it. */ + if( uxSchedulerSuspended == 0U ) + { + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + prvCheckForRunStateChange(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + portGET_ISR_LOCK(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); + + portCLEAR_INTERRUPT_MASK( ulState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } + /*----------------------------------------------------------*/ #if ( configUSE_TICKLESS_IDLE != 0 ) @@ -2190,108 +3499,139 @@ BaseType_t xTaskResumeAll( void ) TCB_t * pxTCB = NULL; BaseType_t xAlreadyYielded = pdFALSE; - /* If uxSchedulerSuspended is zero then this function does not match a - * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); - - /* It is possible that an ISR caused a task to be removed from an event - * list while the scheduler was suspended. If this was the case then the - * removed task will have been added to the xPendingReadyList. Once the - * scheduler has been resumed it is safe to move all the pending ready - * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + #if ( configNUMBER_OF_CORES > 1 ) + if( xSchedulerRunning != pdFALSE ) + #endif { - --uxSchedulerSuspended; - - if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + /* It is possible that an ISR caused a task to be removed from an event + * list while the scheduler was suspended. If this was the case then the + * removed task will have been added to the xPendingReadyList. Once the + * scheduler has been resumed it is safe to move all the pending ready + * tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); { - if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) - { - /* Move any readied tasks from the pending list into the - * appropriate ready list. */ - while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) - { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - portMEMORY_BARRIER(); - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); + BaseType_t xCoreID; + xCoreID = portGET_CORE_ID(); - /* If the moved task has a priority higher than the current - * task then a yield must be performed. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - xYieldPending = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended != 0U ); - if( pxTCB != NULL ) - { - /* A task was unblocked while the scheduler was suspended, - * which may have prevented the next unblock time from being - * re-calculated, in which case re-calculate it now. Mainly - * important for low power tickless implementations, where - * this can prevent an unnecessary exit from low power - * state. */ - prvResetNextTaskUnblockTime(); - } + --uxSchedulerSuspended; + portRELEASE_TASK_LOCK(); - /* If any ticks occurred while the scheduler was suspended then - * they should be processed now. This ensures the tick count does - * not slip, and that any delayed tasks are resumed at the correct - * time. */ + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { - TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ - - if( xPendedCounts > ( TickType_t ) 0U ) + /* Move any readied tasks from the pending list into the + * appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) { - do + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); + portMEMORY_BARRIER(); + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + #if ( configNUMBER_OF_CORES == 1 ) { - if( xTaskIncrementTick() != pdFALSE ) + /* If the moved task has a priority higher than the current + * task then a yield must be performed. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { - xYieldPending = pdTRUE; + xYieldPendings[ xCoreID ] = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + } - --xPendedCounts; - } while( xPendedCounts > ( TickType_t ) 0U ); - - xPendedTicks = 0; + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + * which may have prevented the next unblock time from being + * re-calculated, in which case re-calculate it now. Mainly + * important for low power tickless implementations, where + * this can prevent an unnecessary exit from low power + * state. */ + prvResetNextTaskUnblockTime(); } - else + + /* If any ticks occurred while the scheduler was suspended then + * they should be processed now. This ensures the tick count does + * not slip, and that any delayed tasks are resumed at the correct + * time. + * + * It should be safe to call xTaskIncrementTick here from any core + * since we are in a critical section and xTaskIncrementTick itself + * protects itself within a critical section. Suspending the scheduler + * from any core causes xTaskIncrementTick to increment uxPendedCounts. */ { - mtCOVERAGE_TEST_MARKER(); + TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ + + if( xPendedCounts > ( TickType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + /* Other cores are interrupted from + * within xTaskIncrementTick(). */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --xPendedCounts; + } while( xPendedCounts > ( TickType_t ) 0U ); + + xPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - } - if( xYieldPending != pdFALSE ) - { - #if ( configUSE_PREEMPTION != 0 ) + if( xYieldPendings[ xCoreID ] != pdFALSE ) + { + #if ( configUSE_PREEMPTION != 0 ) + { + xAlreadyYielded = pdTRUE; + } + #endif /* #if ( configUSE_PREEMPTION != 0 ) */ + + #if ( configNUMBER_OF_CORES == 1 ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + } + else { - xAlreadyYielded = pdTRUE; + mtCOVERAGE_TEST_MARKER(); } - #endif - taskYIELD_IF_USING_PREEMPTION(); - } - else - { - mtCOVERAGE_TEST_MARKER(); } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL(); } - taskEXIT_CRITICAL(); return xAlreadyYielded; } @@ -2365,71 +3705,137 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #if ( INCLUDE_xTaskGetHandle == 1 ) - static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, - const char pcNameToQuery[] ) - { - TCB_t * pxNextTCB; - TCB_t * pxFirstTCB; - TCB_t * pxReturn = NULL; - UBaseType_t x; - char cNextChar; - BaseType_t xBreakLoop; - - /* This function is called with the scheduler suspended. */ - - if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + #if ( configNUMBER_OF_CORES == 1 ) + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) { - listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + TCB_t * pxNextTCB; + TCB_t * pxFirstTCB; + TCB_t * pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; - do - { - listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + /* This function is called with the scheduler suspended. */ - /* Check each character in the name looking for a match or - * mismatch. */ - xBreakLoop = pdFALSE; + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + do { - cNextChar = pxNextTCB->pcTaskName[ x ]; + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Check each character in the name looking for a match or + * mismatch. */ + xBreakLoop = pdFALSE; - if( cNextChar != pcNameToQuery[ x ] ) + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - /* Characters didn't match. */ - xBreakLoop = pdTRUE; + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + * found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } } - else if( cNextChar == ( char ) 0x00 ) + + if( pxReturn != NULL ) { - /* Both strings terminated, a match must have been - * found. */ - pxReturn = pxNextTCB; - xBreakLoop = pdTRUE; + /* The handle has been found. */ + break; } - else + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + #else /* if ( configNUMBER_OF_CORES == 1 ) */ + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) + { + TCB_t * pxReturn = NULL; + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + const ListItem_t * pxEndMarker = listGET_END_MARKER( pxList ); + ListItem_t * pxIterator; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + for( pxIterator = listGET_HEAD_ENTRY( pxList ); pxIterator != pxEndMarker; pxIterator = listGET_NEXT( pxIterator ) ) + { + TCB_t * pxTCB = listGET_LIST_ITEM_OWNER( pxIterator ); + + /* Check each character in the name looking for a match or + * mismatch. */ + xBreakLoop = pdFALSE; + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) { - mtCOVERAGE_TEST_MARKER(); + cNextChar = pxTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + * found. */ + pxReturn = pxTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } } - if( xBreakLoop != pdFALSE ) + if( pxReturn != NULL ) { + /* The handle has been found. */ break; } } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - if( pxReturn != NULL ) - { - /* The handle has been found. */ - break; - } - } while( pxNextTCB != pxFirstTCB ); - } - else - { - mtCOVERAGE_TEST_MARKER(); + return pxReturn; } - - return pxReturn; - } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* INCLUDE_xTaskGetHandle */ /*-----------------------------------------------------------*/ @@ -2622,12 +4028,14 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) +/* SMP_TODO : This function returns only idle task handle for core 0. + * Consider to add another function to return the idle task handles. */ TaskHandle_t xTaskGetIdleTaskHandle( void ) { /* If xTaskGetIdleTaskHandle() is called before the scheduler has been - * started, then xIdleTaskHandle will be NULL. */ - configASSERT( ( xIdleTaskHandle != NULL ) ); - return xIdleTaskHandle; + * started, then xIdleTaskHandles will be NULL. */ + configASSERT( ( xIdleTaskHandles[ 0 ] != NULL ) ); + return xIdleTaskHandles[ 0 ]; } #endif /* INCLUDE_xTaskGetIdleTaskHandle */ @@ -2749,21 +4157,33 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - /* Preemption is on, but a context switch should only be - * performed if the unblocked task has a priority that is - * higher than the currently executing task. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* Pend the yield to be performed when the scheduler - * is unsuspended. */ - xYieldPending = pdTRUE; + /* Preemption is on, but a context switch should only be + * performed if the unblocked task has a priority that is + * higher than the currently executing task. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Pend the yield to be performed when the scheduler + * is unsuspended. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + taskENTER_CRITICAL(); + { + prvYieldForTask( pxTCB ); + } + taskEXIT_CRITICAL(); } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } else { @@ -2784,11 +4204,19 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) + BaseType_t xYieldRequiredForCore[ configNUMBER_OF_CORES ] = { pdFALSE }; + #endif /* #if ( configUSE_PREEMPTION == 1 ) && ( configNUMBER_OF_CORES > 1 ) */ + /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsibility to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { /* Minor optimisation. The tick count cannot change in this @@ -2872,24 +4300,32 @@ BaseType_t xTaskIncrementTick( void ) * context switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task's - * priority is higher than the currently executing - * task. - * The case of equal priority tasks sharing - * processing time (which happens when both - * preemption and time slicing are on) is - * handled below.*/ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - xSwitchRequired = pdTRUE; + /* Preemption is on, but a context switch should + * only be performed if the unblocked task's + * priority is higher than the currently executing + * task. + * The case of equal priority tasks sharing + * processing time (which happens when both + * preemption and time slicing are on) is + * handled below.*/ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + prvYieldForTask( pxTCB ); } + #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } } } @@ -2899,16 +4335,36 @@ BaseType_t xTaskIncrementTick( void ) * writer has not explicitly turned time slicing off. */ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + #if ( configNUMBER_OF_CORES == 1 ) { - xSwitchRequired = pdTRUE; + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + BaseType_t xCoreID; + + for( xCoreID = 0; xCoreID < ( ( BaseType_t ) configNUMBER_OF_CORES ); xCoreID++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ xCoreID ]->uxPriority ] ) ) > 1 ) + { + xYieldRequiredForCore[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + #endif /* #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ #if ( configUSE_TICK_HOOK == 1 ) { @@ -2927,16 +4383,50 @@ BaseType_t xTaskIncrementTick( void ) #if ( configUSE_PREEMPTION == 1 ) { - if( xYieldPending != pdFALSE ) + #if ( configNUMBER_OF_CORES == 1 ) { - xSwitchRequired = pdTRUE; + /* For single core the core ID is always 0. */ + if( xYieldPendings[ 0 ] != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + BaseType_t xCoreID, xCurrentCoreID; + xCurrentCoreID = portGET_CORE_ID(); + + for( xCoreID = 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE ) + #endif + { + if( ( xYieldRequiredForCore[ xCoreID ] != pdFALSE ) || ( xYieldPendings[ xCoreID ] != pdFALSE ) ) + { + if( xCoreID == xCurrentCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( xCoreID ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } - #endif /* configUSE_PREEMPTION */ + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } else { @@ -3022,11 +4512,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -3067,78 +4557,171 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ -void vTaskSwitchContext( void ) -{ - if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) +#if ( configNUMBER_OF_CORES == 1 ) + void vTaskSwitchContext( void ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPending = pdTRUE; + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) + { + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + xYieldPendings[ 0 ] = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ 0 ] ); + #else + ulTotalRunTime[ 0 ] = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime[ 0 ] > ulTaskSwitchedInTime[ 0 ] ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ 0 ] - ulTaskSwitchedInTime[ 0 ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime[ 0 ] = ulTotalRunTime[ 0 ]; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + } + #endif + } } - else +#else /* if ( configNUMBER_OF_CORES == 1 ) */ + void vTaskSwitchContext( BaseType_t xCoreID ) { - xYieldPending = pdFALSE; - traceTASK_SWITCHED_OUT(); + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ - #if ( configGENERATE_RUN_TIME_STATS == 1 ) + portGET_TASK_LOCK(); /* Must always acquire the task lock first. */ + portGET_ISR_LOCK(); { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); - #endif + /* vTaskSwitchContext() must never be called from within a critical section. + * This is not necessarily true for single core FreeRTOS, but it is for this + * SMP port. */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 ); - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ xCoreID ] = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); - } + xYieldPendings[ xCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); - ulTaskSwitchedInTime = ulTotalRunTime; - } - #endif /* configGENERATE_RUN_TIME_STATS */ + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime[ xCoreID ] ); + #else + ulTotalRunTime[ xCoreID ] = portGET_RUN_TIME_COUNTER_VALUE(); + #endif - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime[ xCoreID ] > ulTaskSwitchedInTime[ xCoreID ] ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime[ xCoreID ] - ulTaskSwitchedInTime[ xCoreID ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; - } - #endif + ulTaskSwitchedInTime[ xCoreID ] = ulTotalRunTime[ xCoreID ]; + } + #endif /* configGENERATE_RUN_TIME_STATS */ - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif - #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) - { - /* Switch C-Runtime's TLS Block to point to the TLS - * Block specific to this task. */ - configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + /* Select a new task to run. */ + prvSelectHighestPriorityTask( xCoreID ); + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + } + #endif + } } - #endif + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); } -} +#endif /* if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ void vTaskPlaceOnEventList( List_t * const pxEventList, @@ -3275,21 +4858,40 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); } - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* Return true if the task removed from the event list has a higher - * priority than the calling task. This allows the calling task to know if - * it should force a context switch now. */ - xReturn = pdTRUE; + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + * priority than the calling task. This allows the calling task to know if + * it should force a context switch now. */ + xReturn = pdTRUE; - /* Mark that a yield is pending in case the user is not using the - * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* Mark that a yield is pending in case the user is not using the + * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + xReturn = pdFALSE; + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { xReturn = pdFALSE; + + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxUnblockedTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xReturn = pdTRUE; + } + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ return xReturn; } @@ -3333,14 +4935,30 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) + { + if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The unblocked task has a priority above that of the calling task, so + * a context switch is required. This function is called with the + * scheduler suspended so xYieldPending is set so the context switch + * occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPendings[ 0 ] = pdTRUE; + } + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - /* The unblocked task has a priority above that of the calling task, so - * a context switch is required. This function is called with the - * scheduler suspended so xYieldPending is set so the context switch - * occurs immediately that the scheduler is resumed (unsuspended). */ - xYieldPending = pdTRUE; + #if ( configUSE_PREEMPTION == 1 ) + { + taskENTER_CRITICAL(); + { + prvYieldForTask( pxUnblockedTCB ); + } + taskEXIT_CRITICAL(); + } + #endif } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } /*-----------------------------------------------------------*/ @@ -3431,7 +5049,8 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - xYieldPending = pdTRUE; + /* Must be called from within a critical section. */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -3473,6 +5092,81 @@ void vTaskMissedYield( void ) } #endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +/* + * ----------------------------------------------------------- + * The MinimalIdle task. + * ---------------------------------------------------------- + * + * The minimal idle task is used for all the additional cores in a SMP + * system. There must be only 1 idle task and the rest are minimal idle + * tasks. + * + * The portTASK_FUNCTION() macro is used to allow port/compiler specific + * language extensions. The equivalent prototype for this function is: + * + * void prvMinimalIdleTask( void *pvParameters ); + */ + +#if ( configNUMBER_OF_CORES > 1 ) + static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) + { + ( void ) pvParameters; + + taskYIELD(); + + while( INFINITE_LOOP() ) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + { + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* configUSE_MINIMAL_IDLE_HOOK */ + } + } +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /* * ----------------------------------------------------------- @@ -3499,7 +5193,15 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * any. */ portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); - for( ; ; ) + #if ( configNUMBER_OF_CORES > 1 ) + { + /* SMP all cores start up in the idle task. This initial yield gets the application + * tasks started. */ + taskYIELD(); + } + #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + + while( INFINITE_LOOP() ) { /* See if any tasks have deleted themselves - if so then the idle task * is responsible for freeing the deleted task's TCB and stack. */ @@ -3524,9 +5226,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * * A critical region is not required here as we are just reading from * the list, and an occasional incorrect value will not matter. If - * the ready list at the idle priority contains more than one task + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores * then a task other than the idle task is ready to execute. */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 ) + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUMBER_OF_CORES ) { taskYIELD(); } @@ -3593,6 +5296,20 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } } #endif /* configUSE_TICKLESS_IDLE */ + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) + { + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_MINIMAL_IDLE_HOOK == 1 ) ) */ } } /*-----------------------------------------------------------*/ @@ -3615,7 +5332,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) /* A task was made ready while the scheduler was suspended. */ eReturn = eAbortSleep; } - else if( xYieldPending != pdFALSE ) + else if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { /* A yield was pended while the scheduler was suspended. */ eReturn = eAbortSleep; @@ -3657,7 +5374,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) TCB_t * pxTCB; if( ( xIndex >= 0 ) && - ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) + ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToSet ); configASSERT( pxTCB != NULL ); @@ -3677,7 +5394,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) TCB_t * pxTCB; if( ( xIndex >= 0 ) && - ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) + ( xIndex < ( BaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS ) ) { pxTCB = prvGetTCBFromHandle( xTaskToQuery ); pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; @@ -3696,7 +5413,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( portUSING_MPU_WRAPPERS == 1 ) void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, - const MemoryRegion_t * const xRegions ) + const MemoryRegion_t * const pxRegions ) { TCB_t * pxTCB; @@ -3704,7 +5421,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) * the calling task. */ pxTCB = prvGetTCBFromHandle( xTaskToModify ); - vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), pxRegions, NULL, 0 ); } #endif /* portUSING_MPU_WRAPPERS */ @@ -3754,16 +5471,58 @@ static void prvCheckTasksWaitingTermination( void ) * being called too often in the idle task. */ while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - taskENTER_CRITICAL(); + #if ( configNUMBER_OF_CORES == 1 ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; + taskENTER_CRITICAL(); + { + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + } + taskEXIT_CRITICAL(); + + prvDeleteTCB( pxTCB ); } - taskEXIT_CRITICAL(); + #else /* #if( configNUMBER_OF_CORES == 1 ) */ + { + pxTCB = NULL; + + taskENTER_CRITICAL(); + { + /* For SMP, multiple idles can be running simultaneously + * and we need to check that other idles did not cleanup while we were + * waiting to enter the critical section. */ + if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + else + { + /* The TCB to be deleted still has not yet been switched out + * by the scheduler, so we will just exit this loop early and + * try again next time. */ + taskEXIT_CRITICAL(); + break; + } + } + } + taskEXIT_CRITICAL(); - prvDeleteTCB( pxTCB ); + if( pxTCB != NULL ) + { + prvDeleteTCB( pxTCB ); + } + } + #endif /* #if( configNUMBER_OF_CORES == 1 ) */ } } #endif /* INCLUDE_vTaskDelete */ @@ -3792,6 +5551,12 @@ static void prvCheckTasksWaitingTermination( void ) #endif pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + #if ( ( configUSE_CORE_AFFINITY == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + { + pxTaskStatus->uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + #endif + #if ( configUSE_MUTEXES == 1 ) { pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; @@ -3817,7 +5582,7 @@ static void prvCheckTasksWaitingTermination( void ) * state is just set to whatever is passed in. */ if( eState != eInvalid ) { - if( pxTCB == pxCurrentTCB ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { pxTaskStatus->eCurrentState = eRunning; } @@ -4021,7 +5786,7 @@ static void prvCheckTasksWaitingTermination( void ) #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Free up the memory allocated for the task's TLS Block. */ - configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock ); } #endif @@ -4085,19 +5850,47 @@ static void prvResetNextTaskUnblockTime( void ) } /*-----------------------------------------------------------*/ -#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) || ( configNUMBER_OF_CORES > 1 ) - TaskHandle_t xTaskGetCurrentTaskHandle( void ) - { - TaskHandle_t xReturn; + #if ( configNUMBER_OF_CORES == 1 ) + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + + /* A critical section is not required as this is not called from + * an interrupt and the current TCB will always be the same for any + * individual execution thread. */ + xReturn = pxCurrentTCB; + + return xReturn; + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + { + TaskHandle_t xReturn; + UBaseType_t uxSavedInterruptStatus; - /* A critical section is not required as this is not called from - * an interrupt and the current TCB will always be the same for any - * individual execution thread. */ - xReturn = pxCurrentTCB; + uxSavedInterruptStatus = portSET_INTERRUPT_MASK(); + { + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + } + portCLEAR_INTERRUPT_MASK( uxSavedInterruptStatus ); + + return xReturn; + } + + TaskHandle_t xTaskGetCurrentTaskHandleCPU( BaseType_t xCoreID ) + { + TaskHandle_t xReturn = NULL; + + if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } - return xReturn; - } + return xReturn; + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ /*-----------------------------------------------------------*/ @@ -4114,14 +5907,22 @@ static void prvResetNextTaskUnblockTime( void ) } else { - if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) - { - xReturn = taskSCHEDULER_RUNNING; - } - else + #if ( configNUMBER_OF_CORES > 1 ) + taskENTER_CRITICAL(); + #endif { - xReturn = taskSCHEDULER_SUSPENDED; + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } } + #if ( configNUMBER_OF_CORES > 1 ) + taskEXIT_CRITICAL(); + #endif } return xReturn; @@ -4178,6 +5979,16 @@ static void prvResetNextTaskUnblockTime( void ) /* Inherit the priority before being moved into the new list. */ pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; prvAddTaskToReadyList( pxMutexHolderTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is raised. Yield for this task + * if it is not running. */ + if( taskTASK_IS_RUNNING( pxMutexHolderTCB ) != pdTRUE ) + { + prvYieldForTask( pxMutexHolderTCB ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -4268,6 +6079,16 @@ static void prvResetNextTaskUnblockTime( void ) * running to give back the mutex. */ listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ /* Return true to indicate that a context switch is required. * This is only actually required in the corner case whereby @@ -4381,6 +6202,16 @@ static void prvResetNextTaskUnblockTime( void ) } prvAddTaskToReadyList( pxTCB ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* The priority of the task is dropped. Yield the core on + * which the task is running. */ + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) + { + prvYieldCore( pxTCB->xTaskRunState ); + } + } + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ } else { @@ -4406,7 +6237,28 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* configUSE_MUTEXES */ /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) +#if ( configNUMBER_OF_CORES > 1 ) + +/* If not in a critical section then yield immediately. + * Otherwise set xYieldPendings to true to wait to + * yield until exiting the critical section. + */ + void vTaskYieldWithinAPI( void ) + { + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + portYIELD(); + } + else + { + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + } + } +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +/*-----------------------------------------------------------*/ + +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) void vTaskEnterCritical( void ) { @@ -4433,15 +6285,97 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + } + + portINCREMENT_CRITICAL_NESTING_COUNT(); + + /* This is not the interrupt safe version of the enter critical + * function so assert() if it is being called from an interrupt + * context. Only API functions that end in "FromISR" can be used in an + * interrupt. Only assert if the critical nesting count is 1 to + * protect against recursive calls if the assert function also uses a + * critical section. */ + if( portGET_CRITICAL_NESTING_COUNT() == 1U ) + { + portASSERT_IF_IN_ISR(); + + if( uxSchedulerSuspended == 0U ) + { + /* The only time there would be a problem is if this is called + * before a context switch and vTaskExitCritical() is called + * after pxCurrentTCB changes. Therefore this should not be + * used within vTaskSwitchContext(). */ + prvCheckForRunStateChange(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ + +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + + UBaseType_t vTaskEnterCriticalFromISR( void ) + { + UBaseType_t uxSavedInterruptStatus = 0; + + if( xSchedulerRunning != pdFALSE ) + { + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + portGET_ISR_LOCK(); + } + + portINCREMENT_CRITICAL_NESTING_COUNT(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxSavedInterruptStatus; + } + +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( portCRITICAL_NESTING_IN_TCB == 1 ) +#if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) void vTaskExitCritical( void ) { if( xSchedulerRunning != pdFALSE ) { + /* If pxCurrentTCB->uxCriticalNesting is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + /* This function should not be called in ISR. Use vTaskExitCriticalFromISR + * to exit critical section from ISR. */ + portASSERT_IF_IN_ISR(); + if( pxCurrentTCB->uxCriticalNesting > 0U ) { ( pxCurrentTCB->uxCriticalNesting )--; @@ -4466,7 +6400,102 @@ static void prvResetNextTaskUnblockTime( void ) } } -#endif /* portCRITICAL_NESTING_IN_TCB */ +#endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* If critical nesting count is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); + + /* This function should not be called in ISR. Use vTaskExitCriticalFromISR + * to exit critical section from ISR. */ + portASSERT_IF_IN_ISR(); + + if( portGET_CRITICAL_NESTING_COUNT() > 0U ) + { + portDECREMENT_CRITICAL_NESTING_COUNT(); + + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + BaseType_t xYieldCurrentTask; + + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ portGET_CORE_ID() ]; + + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + + void vTaskExitCriticalFromISR( UBaseType_t uxSavedInterruptStatus ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* If critical nesting count is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); + + if( portGET_CRITICAL_NESTING_COUNT() > 0U ) + { + portDECREMENT_CRITICAL_NESTING_COUNT(); + + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) + { + portRELEASE_ISR_LOCK(); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) @@ -4477,11 +6506,11 @@ static void prvResetNextTaskUnblockTime( void ) size_t x; /* Start by copying the entire string. */ - strcpy( pcBuffer, pcTaskName ); + ( void ) strcpy( pcBuffer, pcTaskName ); /* Pad the end of the string with spaces to ensure columns line up when * printed out. */ - for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + for( x = strlen( pcBuffer ); x < ( size_t ) ( ( size_t ) configMAX_TASK_NAME_LEN - 1U ); x++ ) { pcBuffer[ x ] = ' '; } @@ -4741,14 +6770,18 @@ TickType_t uxTaskResetEventItemValue( void ) TaskHandle_t pvTaskIncrementMutexHeldCount( void ) { + TCB_t * pxTCB; + + pxTCB = pxCurrentTCB; + /* If xSemaphoreCreateMutex() is called before any tasks have been created * then pxCurrentTCB will be NULL. */ - if( pxCurrentTCB != NULL ) + if( pxTCB != NULL ) { - ( pxCurrentTCB->uxMutexesHeld )++; + ( pxTCB->uxMutexesHeld )++; } - return pxCurrentTCB; + return pxTCB; } #endif /* configUSE_MUTEXES */ @@ -4756,32 +6789,40 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait, + uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait ) { uint32_t ulReturn; - configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES ); taskENTER_CRITICAL(); { /* Only block if the notification count is not already non-zero. */ - if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL ) + if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] == 0UL ) { /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait ); + traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWaitOn ); /* All ports are written to allow a yield in a critical * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else + { + vTaskYieldWithinAPI(); + } + #endif } else { @@ -4797,18 +6838,18 @@ TickType_t uxTaskResetEventItemValue( void ) taskENTER_CRITICAL(); { - traceTASK_NOTIFY_TAKE( uxIndexToWait ); - ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); + ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; if( ulReturn != 0UL ) { if( xClearCountOnExit != pdFALSE ) { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = 0UL; } else { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] = ulReturn - ( uint32_t ) 1; } } else @@ -4816,7 +6857,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } taskEXIT_CRITICAL(); @@ -4828,7 +6869,7 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait, + BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t * pulNotificationValue, @@ -4836,31 +6877,39 @@ TickType_t uxTaskResetEventItemValue( void ) { BaseType_t xReturn; - configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + configASSERT( uxIndexToWaitOn < configTASK_NOTIFICATION_ARRAY_ENTRIES ); taskENTER_CRITICAL(); { /* Only block if a notification is not already pending. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) { /* Clear bits in the task's notification value as bits may get * set by the notifying task or interrupt. This can be used to * clear the value to zero. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnEntry; /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); - traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait ); + traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWaitOn ); /* All ports are written to allow a yield in a critical * section (some will yield immediately, others wait until the * critical section exits) - but it is not something that * application code should ever do. */ - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else + { + vTaskYieldWithinAPI(); + } + #endif } else { @@ -4876,20 +6925,20 @@ TickType_t uxTaskResetEventItemValue( void ) taskENTER_CRITICAL(); { - traceTASK_NOTIFY_WAIT( uxIndexToWait ); + traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); if( pulNotificationValue != NULL ) { /* Output the current notification value, which may or may not * have changed. */ - *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; } /* If ucNotifyValue is set then either the task never entered the * blocked state (because a notification was already pending) or the * task unblocked because of a notification. Otherwise the task * unblocked because of a timeout. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] != taskNOTIFICATION_RECEIVED ) { /* A notification was not received. */ xReturn = pdFALSE; @@ -4898,11 +6947,11 @@ TickType_t uxTaskResetEventItemValue( void ) { /* A notification was already pending or a notification was * received while the task was waiting. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit; + pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ] &= ~ulBitsToClearOnExit; xReturn = pdTRUE; } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } taskEXIT_CRITICAL(); @@ -5011,16 +7060,28 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - taskYIELD_IF_USING_PREEMPTION(); + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB ); + } + #endif } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else { @@ -5072,7 +7133,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -5146,27 +7207,47 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - if( pxHigherPriorityTaskWoken != NULL ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { - *pxHigherPriorityTaskWoken = pdTRUE; - } + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter to an ISR - * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* Mark that a yield is pending in case the user is not + * using the "xHigherPriorityTaskWoken" parameter to an ISR + * safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + } + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -5207,7 +7288,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -5237,27 +7318,47 @@ TickType_t uxTaskResetEventItemValue( void ) listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - /* The notified task has a priority above the currently - * executing task so a yield is required. */ - if( pxHigherPriorityTaskWoken != NULL ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { - *pxHigherPriorityTaskWoken = pdTRUE; - } + /* The notified task has a priority above the currently + * executing task so a yield is required. */ + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } - /* Mark that a yield is pending in case the user is not - * using the "xHigherPriorityTaskWoken" parameter in an ISR - * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + /* Mark that a yield is pending in case the user is not + * using the "xHigherPriorityTaskWoken" parameter in an ISR + * safe FreeRTOS function. */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + } + #endif /* #if ( configUSE_PREEMPTION == 1 ) */ } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -5362,24 +7463,56 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ /*-----------------------------------------------------------*/ -#if ( configGENERATE_RUN_TIME_STATS == 1 ) +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) { - return ulTaskGetRunTimeCounter( xIdleTaskHandle ); + configRUN_TIME_COUNTER_TYPE ulReturn = 0; + BaseType_t i; + + for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ ) + { + ulReturn += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + + return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( configGENERATE_RUN_TIME_STATS == 1 ) +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) { - return ulTaskGetRunTimePercent( xIdleTaskHandle ); + configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter = 0; + BaseType_t i; + + ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE() * configNUMBER_OF_CORES; + + /* For percentage calculations. */ + ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) + { + for( i = 0; i < ( BaseType_t ) configNUMBER_OF_CORES; i++ ) + { + ulRunTimeCounter += xIdleTaskHandles[ i ]->ulRunTimeCounter; + } + + ulReturn = ulRunTimeCounter / ulTotalTime; + } + else + { + ulReturn = 0; + } + + return ulReturn; } -#endif +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ /*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, diff --git a/timers.c b/timers.c index 457ab8f8fa..0028c33485 100644 --- a/timers.c +++ b/timers.c @@ -72,17 +72,17 @@ #define tmrSTATUS_IS_AUTORELOAD ( ( uint8_t ) 0x04 ) /* The definition of the timers themselves. */ - typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ + typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */ - TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */ - void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */ + const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */ + void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + portTIMER_CALLBACK_ATTRIBUTE TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */ + UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif - uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ + uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ } xTIMER; /* The old xTIMER name is maintained above then typedefed to the new Timer_t @@ -103,6 +103,7 @@ typedef struct tmrCallbackParameters { + portTIMER_CALLBACK_ATTRIBUTE PendedFunction_t pxCallbackFunction; /* << The callback function to execute. */ void * pvParameter1; /* << The value that will be used as the callback functions first parameter. */ uint32_t ulParameter2; /* << The value that will be used as the callback functions second parameter. */ @@ -383,15 +384,17 @@ } /*-----------------------------------------------------------*/ - BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) + BaseType_t xTimerGenericCommandFromTask( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) { BaseType_t xReturn = pdFAIL; DaemonTaskMessage_t xMessage; + ( void ) pxHigherPriorityTaskWoken; + configASSERT( xTimer ); /* Send a message to the timer service task to perform a particular action @@ -403,6 +406,8 @@ xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; xMessage.u.xTimerParameters.pxTimer = xTimer; + configASSERT( xCommandID < tmrFIRST_FROM_ISR_COMMAND ); + if( xCommandID < tmrFIRST_FROM_ISR_COMMAND ) { if( xTaskGetSchedulerState() == taskSCHEDULER_RUNNING ) @@ -414,7 +419,43 @@ xReturn = xQueueSendToBack( xTimerQueue, &xMessage, tmrNO_DELAY ); } } - else + + traceTIMER_COMMAND_SEND( xTimer, xCommandID, xOptionalValue, xReturn ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t xTimerGenericCommandFromISR( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) + { + BaseType_t xReturn = pdFAIL; + DaemonTaskMessage_t xMessage; + + ( void ) xTicksToWait; + + configASSERT( xTimer ); + + /* Send a message to the timer service task to perform a particular action + * on a particular timer definition. */ + if( xTimerQueue != NULL ) + { + /* Send a command to the timer service task to start the xTimer timer. */ + xMessage.xMessageID = xCommandID; + xMessage.u.xTimerParameters.xMessageValue = xOptionalValue; + xMessage.u.xTimerParameters.pxTimer = xTimer; + + configASSERT( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ); + + if( xCommandID >= tmrFIRST_FROM_ISR_COMMAND ) { xReturn = xQueueSendToBackFromISR( xTimerQueue, &xMessage, pxHigherPriorityTaskWoken ); } @@ -669,7 +710,15 @@ * block time to expire. If a command arrived between the * critical section being exited and this yield then the yield * will not cause the task to block. */ - portYIELD_WITHIN_API(); + #if ( configNUMBER_OF_CORES == 1 ) + { + portYIELD_WITHIN_API(); + } + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ + { + vTaskYieldWithinAPI(); + } + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ } else {