diff --git a/queue.c b/queue.c index df48e24aac..98b46a6779 100644 --- a/queue.c +++ b/queue.c @@ -185,9 +185,9 @@ typedef xQUEUE Queue_t; * to indicate that a task may require unblocking. When the queue in unlocked * these lock counts are inspected, and the appropriate action taken. */ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( portUSING_GRANULAR_LOCKS == 0 ) static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ /* * Suspends a queue. Prevents other tasks from accessing the queue but allows @@ -268,7 +268,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. */ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( portUSING_GRANULAR_LOCKS == 0 ) #define prvLockQueue( pxQueue ) \ taskLOCK_DATA_GROUP( &( pxQueue->xTaskSpinlock ), &( pxQueue->xISRSpinlock ) ); \ { \ @@ -282,7 +282,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, } \ } \ taskUNLOCK_DATA_GROUP( &( pxQueue->xTaskSpinlock ), &( pxQueue->xISRSpinlock ) ) -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ /* * Macro to increment cTxLock member of the queue data structure. It is @@ -2184,7 +2184,7 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) traceENTER_uxQueueMessagesWaiting( xQueue ); - configASSERT( xQueue ); + configASSERT( pxQueue ); taskLOCK_DATA_GROUP( &( pxQueue->xTaskSpinlock ), &( pxQueue->xISRSpinlock ) ); { @@ -2448,7 +2448,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } /*-----------------------------------------------------------*/ -#if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) +#if ( portUSING_GRANULAR_LOCKS == 0 ) static void prvUnlockQueue( Queue_t * const pxQueue ) { /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ @@ -2567,7 +2567,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } taskUNLOCK_DATA_GROUP( &( pxQueue->xTaskSpinlock ), &( pxQueue->xISRSpinlock ) ); } -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 0 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +#endif /* #if ( portUSING_GRANULAR_LOCKS == 0 ) */ /*-----------------------------------------------------------*/ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) diff --git a/stream_buffer.c b/stream_buffer.c index 7fa35970a3..51a38b79ec 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -72,7 +72,7 @@ #define sbRESUME( pxStreamBuffer ) prvResumeStreamBuffer( pxStreamBuffer ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define sbSUSPEND( pxStreamBuffer ) vTaskSuspendAll() - #define sbRESUME( pxStreamBuffer ) xTaskResumeAll() + #define sbRESUME( pxStreamBuffer ) ( void ) xTaskResumeAll() #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* If the user has not provided application specific Rx notification macros, diff --git a/tasks.c b/tasks.c index 254e12d000..fb1e8edbfc 100644 --- a/tasks.c +++ b/tasks.c @@ -527,6 +527,66 @@ PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime[ conf PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_KERNEL_ISR_SPINLOCK_STATIC; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +/* Helper macros to get and release the kernel locks. */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define taskGET_KERNEL_TASK_LOCK() \ + portDISABLE_INTERRUPTS(); \ + portGET_SPINLOCK( &xTaskSpinlock ); \ + prvCheckForRunStateChange(); \ + portENABLE_INTERRUPTS(); + + #define taskRELEASE_KERNEL_TASK_LOCK() portRELEASE_SPINLOCK( &xTaskSpinlock ); + + #define taskGET_KERNEL_ISR_LOCK() \ + portDISABLE_INTERRUPTS(); \ + portGET_SPINLOCK( &xISRSpinlock ); \ + portENABLE_INTERRUPTS(); + + #define taskRELEASE_KERNEL_ISR_LOCK() portRELEASE_SPINLOCK( &xISRSpinlock ); + + #define taskGET_KERNEL_LOCKS() \ + taskGET_KERNEL_TASK_LOCK(); \ + taskGET_KERNEL_ISR_LOCK(); + + #define taskRELEASE_KERNEL_LOCKS() \ + taskRELEASE_KERNEL_ISR_LOCK(); \ + taskRELEASE_KERNEL_TASK_LOCK(); +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + +/* Helper macros to lock (critical section) the kernel data group . */ +#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + #define taskLOCK_KERNEL_DATA_GROUP() \ + portDISABLE_INTERRUPTS(); \ + portGET_SPINLOCK( &xTaskSpinlock ); \ + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) \ + { \ + prvCheckForRunStateChange(); \ + } \ + portINCREMENT_CRITICAL_NESTING_COUNT(); \ + portGET_SPINLOCK( &xISRSpinlock ); \ + portINCREMENT_CRITICAL_NESTING_COUNT(); + + #define taskUNLOCK_KERNEL_DATA_GROUP() \ + BaseType_t xYieldCurrentTask; \ + configASSERT( portGET_CRITICAL_NESTING_COUNT() > 0U ); \ + xYieldCurrentTask = xTaskUnlockCanYield(); \ + portRELEASE_SPINLOCK( &xISRSpinlock ); \ + portDECREMENT_CRITICAL_NESTING_COUNT(); \ + portRELEASE_SPINLOCK( &xTaskSpinlock ); \ + portDECREMENT_CRITICAL_NESTING_COUNT(); \ + if( portGET_CRITICAL_NESTING_COUNT() == 0U ) \ + { \ + portENABLE_INTERRUPTS(); \ + if( xYieldCurrentTask != pdFALSE ) \ + { \ + taskYIELD(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + } +#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ @@ -819,12 +879,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; * so this is safe. */ pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; - while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) + while( pxThisTCB && ( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) ) { #if ( portUSING_GRANULAR_LOCKS == 1 ) { /* We are only here if we just suspended the scheduler, - * and another task has requested that we yield. */ + * and another task has requested that we yield. */ portRELEASE_SPINLOCK( &xTaskSpinlock ); portMEMORY_BARRIER(); configASSERT( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ); @@ -2128,7 +2188,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { uxCurrentNumberOfTasks++; @@ -2177,7 +2241,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -2226,7 +2294,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskDelete( xTaskToDelete ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If null is passed in here then it is the calling task that is * being deleted. */ @@ -2333,7 +2405,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, prvResetNextTaskUnblockTime(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* If the task is not deleting itself, call prvDeleteTCB from outside of * critical section. If a task deletes itself, prvDeleteTCB is called @@ -2534,14 +2610,22 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, else #endif { - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( pxEventList == &xPendingReadyList ) { @@ -2651,14 +2735,22 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2725,14 +2817,22 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxBasePriority; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -2817,7 +2917,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If null is passed in here then it is the priority of the calling * task that is being changed. */ @@ -2996,7 +3100,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, ( void ) uxPriorityUsedOnEntry; } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPrioritySet(); } @@ -3018,7 +3126,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinitySet( xTask, uxCoreAffinityMask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxTCB = prvGetTCBFromHandle( xTask ); @@ -3062,7 +3174,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskCoreAffinitySet(); } @@ -3077,12 +3193,20 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxTCB = prvGetTCBFromHandle( xTask ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3100,13 +3224,21 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionDisable( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxTCB = prvGetTCBFromHandle( xTask ); pxTCB->xPreemptionDisable++; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPreemptionDisable(); } @@ -3123,7 +3255,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionEnable( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB->xPreemptionDisable > 0U ); @@ -3139,7 +3275,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPreemptionEnable(); } @@ -3155,7 +3295,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskSuspend( xTaskToSuspend ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If null is passed in here then it is the running task that is * being suspended. */ @@ -3242,7 +3386,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #if ( configNUMBER_OF_CORES == 1 ) { @@ -3252,11 +3400,19 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { /* Reset the next expected unblock time in case it referred to the * task that is now in the Suspended state. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { prvResetNextTaskUnblockTime(); } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } else { @@ -3405,7 +3561,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, if( pxTCB != NULL ) #endif { - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -3426,7 +3586,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } else { @@ -4151,11 +4315,11 @@ BaseType_t xTaskResumeAll( void ) } } - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( ( xYieldPendings[ xCoreID ] != pdFALSE ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) ) - #else - if( xYieldPendings[ xCoreID ] != pdFALSE ) - #endif + if( ( xYieldPendings[ xCoreID ] != pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == 0U ) + #endif + ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -4608,11 +4772,19 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { xPendedTicks++; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ xTicksToJump--; } else @@ -5037,11 +5209,19 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { xTCB->pxTaskTag = pxHookFunction; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskSetApplicationTaskTag(); } @@ -5063,11 +5243,19 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { xReturn = pxTCB->pxTaskTag; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskGetApplicationTaskTag( xReturn ); @@ -5237,12 +5425,12 @@ BaseType_t xTaskIncrementTick( void ) traceENTER_vTaskSwitchContext(); /* Acquire both locks: - * - The ISR lock protects the ready list from simultaneous access by - * both other ISRs and tasks. - * - We also take the task lock to pause here in case another core has - * suspended the scheduler. We don't want to simply set xYieldPending - * and move on if another core suspended the scheduler. We should only - * do that if the current core has suspended the scheduler. */ + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) ) portGET_SPINLOCK( &xTaskSpinlock ); @@ -5260,10 +5448,15 @@ BaseType_t xTaskIncrementTick( void ) configASSERT( portGET_CRITICAL_NESTING_COUNT() == 0 ); #endif /* #if ( !( portUSING_GRANULAR_LOCKS == 1 ) ) */ - if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + || ( ( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ] ) ) && ( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable > 0U ) ) + #endif + ) { - /* The scheduler is currently suspended - do not allow a context - * switch. */ + /* The scheduler is currently suspended or the task + * has requested to not be preempted - do not allow + * a context switch. */ xYieldPendings[ xCoreID ] = pdTRUE; } else @@ -5355,8 +5548,8 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, configASSERT( pxEventList ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* Suspend the kernel data group as we are about to access its members */ - vTaskSuspendAll(); + /* Take kernel data group locks as we are about to access its members */ + taskGET_KERNEL_LOCKS(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* THIS FUNCTION MUST BE CALLED WITH THE @@ -5380,8 +5573,8 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are done accessing the kernel data group. Resume it. */ - ( void ) xTaskResumeAll(); + /* We are done accessing the kernel data group. Release the locks. */ + taskRELEASE_KERNEL_LOCKS(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPlaceOnEventList(); @@ -5398,8 +5591,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* Suspend the kernel data group as we are about to access its members */ - vTaskSuspendAll(); + /* Take kernel data group locks as we are about to access its members */ + taskGET_KERNEL_LOCKS(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by @@ -5422,8 +5615,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are done accessing the kernel data group. Resume it. */ - ( void ) xTaskResumeAll(); + /* We are done accessing the kernel data group. Release the locks. */ + taskRELEASE_KERNEL_LOCKS(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPlaceOnUnorderedEventList(); @@ -5441,8 +5634,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, configASSERT( pxEventList ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* Suspend the kernel data group as we are about to access its members */ - vTaskSuspendAll(); + /* Take kernel data group locks as we are about to access its members */ + taskGET_KERNEL_LOCKS(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* This function should not be called by application code hence the @@ -5470,8 +5663,8 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are done accessing the kernel data group. Resume it. */ - ( void ) xTaskResumeAll(); + /* We are done accessing the kernel data group. Release the locks. */ + taskRELEASE_KERNEL_LOCKS(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPlaceOnEventListRestricted(); @@ -5487,11 +5680,10 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) traceENTER_xTaskRemoveFromEventList( pxEventList ); - #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be * called from a critical section within an ISR. */ - #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* Lock the kernel data group as we are about to access its members */ UBaseType_t uxSavedInterruptStatus; @@ -5503,14 +5695,18 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) else { uxSavedInterruptStatus = 0; - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } /* Before taking the kernel lock, another task/ISR could have already * emptied the pxEventList. So we insert a check here to see if * pxEventList is empty before attempting to remove an item from it. */ - if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) - { + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* The event list is sorted in priority order, so the first in the list can @@ -5592,23 +5788,27 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - } - else - { - /* The pxEventList was emptied before we entered the critical - * section, Nothing to do except return pdFALSE. */ - xReturn = pdFALSE; - } +} +else +{ + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; +} - /* We are done accessing the kernel data group. Unlock it. */ - if( portCHECK_IF_IN_ISR() == pdTRUE ) - { - taskUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, &xISRSpinlock ); - } - else - { - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); - } +/* We are done accessing the kernel data group. Unlock it. */ +if( portCHECK_IF_IN_ISR() == pdTRUE ) +{ + taskUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, &xISRSpinlock ); +} +else +{ + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ +} #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskRemoveFromEventList( xReturn ); @@ -5674,13 +5874,21 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, { #if ( configUSE_PREEMPTION == 1 ) { - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { prvYieldForTask( pxUnblockedTCB ); } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - #endif + #endif /* if ( configUSE_PREEMPTION == 1 ) */ } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -5693,12 +5901,20 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) traceENTER_vTaskSetTimeOutState( pxTimeOut ); configASSERT( pxTimeOut ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskSetTimeOutState(); } @@ -5710,7 +5926,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* Lock the kernel data group as we are about to access its members */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* For internal use only as it does not use a critical section. */ @@ -5719,7 +5935,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* We are done accessing the kernel data group. Unlock it. */ - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskUNLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskInternalSetTimeOutState(); @@ -5736,7 +5952,11 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; @@ -5787,7 +6007,11 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskCheckForTimeOut( xReturn ); @@ -6089,7 +6313,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* Lock the kernel data group as we are about to access its members */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskLOCK_KERNEL_DATA_GROUP(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* This function must be called from a critical section. */ #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ @@ -6128,7 +6352,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* We are done accessing the kernel data group. Unlock it. */ - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskUNLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_eTaskConfirmSleepModeStatus( eReturn ); @@ -6279,7 +6503,11 @@ static void prvCheckTasksWaitingTermination( void ) { pxTCB = NULL; - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* For SMP, multiple idles can be running simultaneously * and we need to check that other idles did not cleanup while we were @@ -6307,7 +6535,11 @@ static void prvCheckTasksWaitingTermination( void ) } } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( pxTCB != NULL ) { @@ -6428,14 +6660,22 @@ static void prvCheckTasksWaitingTermination( void ) /* Tasks can be in pending ready list and other state list at the * same time. These tasks are in ready state no matter what state * list the task is in. */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) { pxTaskStatus->eCurrentState = eReady; } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } } else @@ -6755,7 +6995,11 @@ static void prvResetNextTaskUnblockTime( void ) else { #if ( configNUMBER_OF_CORES > 1 ) - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( !( portUSING_GRANULAR_LOCKS == 1 ) ) + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #else /* #if ( ! ( portUSING_GRANULAR_LOCKS == 1 ) ) */ + taskLOCK_KERNEL_DATA_GROUP(); + #endif /* #if ( ! ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif { if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) @@ -6768,7 +7012,11 @@ static void prvResetNextTaskUnblockTime( void ) } } #if ( configNUMBER_OF_CORES > 1 ) - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( !( portUSING_GRANULAR_LOCKS == 1 ) ) + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #else /* #if ( ! ( portUSING_GRANULAR_LOCKS == 1 ) ) */ + taskUNLOCK_KERNEL_DATA_GROUP(); + #endif /* #if ( ! ( portUSING_GRANULAR_LOCKS == 1 ) */ #endif } @@ -6789,7 +7037,11 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority * inheritance is not applied in this scenario. */ @@ -6877,7 +7129,11 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskPriorityInherit( xReturn ); @@ -6898,7 +7154,7 @@ static void prvResetNextTaskUnblockTime( void ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* Lock the kernel data group as we are about to access its members */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ if( pxMutexHolder != NULL ) @@ -6980,7 +7236,7 @@ static void prvResetNextTaskUnblockTime( void ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* We are done accessing the kernel data group. Unlock it. */ - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskUNLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskPriorityDisinherit( xReturn ); @@ -7003,7 +7259,11 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_vTaskPriorityDisinheritAfterTimeout( pxMutexHolder, pxEventList ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* If a task waiting for a mutex causes the mutex holder to inherit a * priority, but the waiting task times out, then the holder should @@ -7121,7 +7381,11 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_vTaskPriorityDisinheritAfterTimeout(); } @@ -7945,7 +8209,7 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* Lock the kernel data group as we are about to access its members */ - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ pxTCB = pxCurrentTCB; @@ -7959,7 +8223,7 @@ TickType_t uxTaskResetEventItemValue( void ) #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) /* We are done accessing the kernel data group. Unlock it. */ - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + taskUNLOCK_KERNEL_DATA_GROUP(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_pvTaskIncrementMutexHeldCount( pxTCB ); @@ -8040,7 +8304,11 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { traceTASK_NOTIFY_TAKE( uxIndexToWaitOn ); ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWaitOn ]; @@ -8063,7 +8331,11 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_ulTaskGenericNotifyTake( ulReturn ); @@ -8148,7 +8420,11 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { traceTASK_NOTIFY_WAIT( uxIndexToWaitOn ); @@ -8178,7 +8454,11 @@ TickType_t uxTaskResetEventItemValue( void ) pxCurrentTCB->ucNotifyState[ uxIndexToWaitOn ] = taskNOT_WAITING_NOTIFICATION; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskGenericNotifyWait( xReturn ); @@ -8206,7 +8486,11 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { if( pulPreviousNotificationValue != NULL ) { @@ -8298,7 +8582,11 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskGenericNotify( xReturn ); @@ -8606,7 +8894,11 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -8618,7 +8910,11 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_xTaskGenericNotifyStateClear( xReturn ); @@ -8645,14 +8941,22 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + taskUNLOCK_KERNEL_DATA_GROUP(); + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + taskUNLOCK_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ); + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ traceRETURN_ulTaskGenericNotifyValueClear( ulReturn );