Skip to content

Commit c4eea80

Browse files
committed
Merge branch 'change/freertos_local_crit_section_macro' into 'master'
change(freertos/idf): Refactor thread safety convenience macros Closes IDF-8161 See merge request espressif/esp-idf!26805
2 parents 5d02e71 + 52a9295 commit c4eea80

File tree

5 files changed

+144
-198
lines changed

5 files changed

+144
-198
lines changed

components/freertos/FreeRTOS-Kernel/event_groups.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -534,11 +534,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
534534
EventGroup_t const * const pxEventBits = xEventGroup;
535535
EventBits_t uxReturn;
536536

537-
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
537+
prvENTER_CRITICAL_OR_MASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
538538
{
539539
uxReturn = pxEventBits->uxEventBits;
540540
}
541-
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
541+
prvEXIT_CRITICAL_OR_UNMASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
542542

543543
return uxReturn;
544544
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */

components/freertos/FreeRTOS-Kernel/queue.c

+10-60
Original file line numberDiff line numberDiff line change
@@ -54,56 +54,6 @@
5454
* correct privileged Vs unprivileged linkage and placement. */
5555
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
5656

57-
/* Some code sections require extra critical sections when building for SMP
58-
* ( configNUMBER_OF_CORES > 1 ). */
59-
#if ( configNUMBER_OF_CORES > 1 )
60-
/* Macros that Enter/exit a critical section only when building for SMP */
61-
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
62-
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
63-
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
64-
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
65-
66-
static inline __attribute__( ( always_inline ) )
67-
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
68-
{
69-
if( portCHECK_IF_IN_ISR() == pdFALSE )
70-
{
71-
taskENTER_CRITICAL( pxLock );
72-
}
73-
else
74-
{
75-
#ifdef __clang_analyzer__
76-
/* Teach clang-tidy that ISR version macro can be different */
77-
configASSERT( 1 );
78-
#endif
79-
taskENTER_CRITICAL_ISR( pxLock );
80-
}
81-
}
82-
83-
static inline __attribute__( ( always_inline ) )
84-
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
85-
{
86-
if( portCHECK_IF_IN_ISR() == pdFALSE )
87-
{
88-
taskEXIT_CRITICAL( pxLock );
89-
}
90-
else
91-
{
92-
#ifdef __clang_analyzer__
93-
/* Teach clang-tidy that ISR version macro can be different */
94-
configASSERT( 1 );
95-
#endif
96-
taskEXIT_CRITICAL_ISR( pxLock );
97-
}
98-
}
99-
#else /* configNUMBER_OF_CORES > 1 */
100-
/* Macros that Enter/exit a critical section only when building for SMP */
101-
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
102-
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
103-
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
104-
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
105-
#endif /* configNUMBER_OF_CORES > 1 */
106-
10757
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
10858
* calls are deterministic (as queue locks use scheduler suspension instead of
10959
* critical sections). However, the SMP implementation is non-deterministic
@@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
31093059

31103060
/* For SMP, we need to take the queue registry lock in case another
31113061
* core updates the register simultaneously. */
3112-
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3062+
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
31133063
{
31143064
if( pcQueueName != NULL )
31153065
{
@@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
31453095
}
31463096
}
31473097
/* Release the previously taken queue registry lock. */
3148-
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3098+
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
31493099
}
31503100

31513101
#endif /* configQUEUE_REGISTRY_SIZE */
@@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
31623112

31633113
/* For SMP, we need to take the queue registry lock in case another
31643114
* core updates the register simultaneously. */
3165-
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3115+
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
31663116
{
31673117
/* Note there is nothing here to protect against another task adding or
31683118
* removing entries from the registry while it is being searched. */
@@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
31813131
}
31823132
}
31833133
/* Release the previously taken queue registry lock. */
3184-
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3134+
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
31853135

31863136
return pcReturn;
31873137
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
@@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
31993149

32003150
/* For SMP, we need to take the queue registry lock in case another
32013151
* core updates the register simultaneously. */
3202-
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3152+
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
32033153
{
32043154
/* See if the handle of the queue being unregistered in actually in the
32053155
* registry. */
@@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
32233173
}
32243174
}
32253175
/* Release the previously taken queue registry lock. */
3226-
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
3176+
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
32273177
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
32283178

32293179
#endif /* configQUEUE_REGISTRY_SIZE */
@@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
32473197

32483198
/* For SMP, we need to take the queue's xQueueLock as we are about to
32493199
* access the queue. */
3250-
taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
3200+
prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
32513201
{
32523202
#if ( queueUSE_LOCKS == 1 )
32533203
{
@@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
32783228
#endif /* queueUSE_LOCKS == 1 */
32793229
}
32803230
/* Release the previously taken xQueueLock. */
3281-
taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
3231+
prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
32823232
}
32833233

32843234
#endif /* configUSE_TIMERS */
@@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
34133363

34143364
/* In SMP, queue sets have their own xQueueLock. Thus we need to also
34153365
* acquire the queue set's xQueueLock before accessing it. */
3416-
taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
3366+
prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
34173367
{
34183368
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
34193369
{
@@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
34633413
}
34643414
}
34653415
/* Release the previously acquired queue set's xQueueLock. */
3466-
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
3416+
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
34673417

34683418
return xReturn;
34693419
}

0 commit comments

Comments
 (0)