From ce4a5c5b5ca02014d256db740c216228a300d4c3 Mon Sep 17 00:00:00 2001 From: Markus Klein Date: Wed, 9 Aug 2023 16:56:05 +0200 Subject: [PATCH] * Top-level compiler headers will exist in the core folder (no separate compiler folder is needed). ** The cmsis_compiler.h header will continue to figure out which compiler toolchain is being used. ** Compiler headers specific to each architecture profiles can reside within the new architecture profile folders. * Introduce compiler abstraction for cortex-R --- .../Core/Include/a-profile/cmsis_armclang_a.h | 538 +----- CMSIS/Core/Include/a-profile/cmsis_clang_a.h | 161 ++ CMSIS/Core/Include/a-profile/cmsis_gcc_a.h | 716 +------- CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h | 325 +--- CMSIS/Core/Include/cmsis_armclang.h | 846 +++++++++ CMSIS/Core/Include/cmsis_clang.h | 847 +++++++++ CMSIS/Core/Include/cmsis_compiler.h | 50 +- CMSIS/Core/Include/cmsis_gcc.h | 1019 +++++++++++ CMSIS/Core/Include/cmsis_iccarm.h | 920 ++++++++++ CMSIS/Core/Include/cmsis_tiarmclang.h | 833 +++++++++ .../Core/Include/m-profile/cmsis_armclang_m.h | 1258 +++----------- CMSIS/Core/Include/m-profile/cmsis_clang_m.h | 1247 +++----------- CMSIS/Core/Include/m-profile/cmsis_gcc_m.h | 1528 ++++------------- CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h | 619 +------ .../Include/m-profile/cmsis_tiarmclang_m.h | 997 ++--------- .../Core/Include/r-profile/cmsis_armclang_r.h | 161 ++ CMSIS/Core/Include/r-profile/cmsis_clang_r.h | 161 ++ CMSIS/Core/Include/r-profile/cmsis_gcc_r.h | 163 ++ CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h | 39 + 19 files changed, 6078 insertions(+), 6350 deletions(-) create mode 100644 CMSIS/Core/Include/a-profile/cmsis_clang_a.h create mode 100644 CMSIS/Core/Include/cmsis_armclang.h create mode 100644 CMSIS/Core/Include/cmsis_clang.h create mode 100644 CMSIS/Core/Include/cmsis_gcc.h create mode 100644 CMSIS/Core/Include/cmsis_iccarm.h create mode 100644 CMSIS/Core/Include/cmsis_tiarmclang.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_armclang_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_clang_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_gcc_r.h create mode 100644 CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h diff --git a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h index c63a1b69b..13fc46bcc 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_armclang_a.h @@ -1,7 +1,7 @@ /**************************************************************************//** * @file cmsis_armclang_a.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V1.2.3 + * @version V6.0.0 * @date 11. October 2023 ******************************************************************************/ /* @@ -27,539 +27,16 @@ #pragma clang system_header /* treat file as system include file */ -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" #endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */ - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */ - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" -/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */ - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed)) -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - -/* ########################## Core Instruction Access ######################### */ -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP __builtin_arm_nop - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI __builtin_arm_wfi - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE __builtin_arm_wfe - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV __builtin_arm_sev - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __builtin_arm_isb(0xF) - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __builtin_arm_dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __builtin_arm_dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __builtin_bswap32(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __ROR(__REV(value), 16) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT __builtin_arm_rbit - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT __builtin_arm_ssat - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT __builtin_arm_usat - -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0, #0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0, #0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0, #0" : "=Q" (*ptr) : "r" (value) ); -} - -/* ################### Compiler specific Intrinsics ########################### */ - -#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) - -#define __SADD8 __builtin_arm_sadd8 -#define __SADD16 __builtin_arm_sadd16 -#define __QADD8 __builtin_arm_qadd8 -#define __QSUB8 __builtin_arm_qsub8 -#define __QADD16 __builtin_arm_qadd16 -#define __SHADD16 __builtin_arm_shadd16 -#define __QSUB16 __builtin_arm_qsub16 -#define __SHSUB16 __builtin_arm_shsub16 -#define __QASX __builtin_arm_qasx -#define __SHASX __builtin_arm_shasx -#define __QSAX __builtin_arm_qsax -#define __SHSAX __builtin_arm_shsax -#define __SXTB16 __builtin_arm_sxtb16 -#define __SMUAD __builtin_arm_smuad -#define __SMUADX __builtin_arm_smuadx -#define __SMLAD __builtin_arm_smlad -#define __SMLADX __builtin_arm_smladx -#define __SMLALD __builtin_arm_smlald -#define __SMLALDX __builtin_arm_smlaldx -#define __SMUSD __builtin_arm_smusd -#define __SMUSDX __builtin_arm_smusdx -#define __SMLSDX __builtin_arm_smlsdx -#define __USAT16 __builtin_arm_usat16 -#define __SSUB8 __builtin_arm_ssub8 -#define __SXTB16 __builtin_arm_sxtb16 -#define __SXTAB16 __builtin_arm_sxtab16 - - -__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) -{ - int32_t result; - - __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); - return(result); -} - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return(result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ /* ########################### Core Function Access ########################### */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ - #if (__ARM_FP >= 1) - return __builtin_arm_get_fpscr(); - #else - return(0U); - #endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ - #if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); - #else - (void)fpscr; - #endif -} - /** \brief Get CPSR Register \return CPSR Register value @@ -669,6 +146,9 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) #endif } +/** @} end of CMSIS_Core_RegAccFunctions */ + + /* * Include common core functions to access Coprocessor 15 registers */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_clang_a.h b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h new file mode 100644 index 000000000..eb0a8ba15 --- /dev/null +++ b/CMSIS/Core/Include/a-profile/cmsis_clang_a.h @@ -0,0 +1,161 @@ +/**************************************************************************//** + * @file cmsis_clang_a.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.5.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_CLANG_COREA_H +#define __CMSIS_CLANG_COREA_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#endif /* __CMSIS_CLANG_COREA_H */ diff --git a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h index d25bdedca..e1db122ed 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_gcc_a.h @@ -1,7 +1,7 @@ /**************************************************************************//** * @file cmsis_gcc_a.h * @brief CMSIS compiler GCC header file - * @version V1.3.4 + * @version V6.0.0 * @date 11. October 2023 ******************************************************************************/ /* @@ -25,722 +25,16 @@ #ifndef __CMSIS_GCC_A_H #define __CMSIS_GCC_A_H +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif + /* ignore some GCC warnings */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-conversion" #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wunused-parameter" -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __FORCEINLINE - #define __FORCEINLINE __attribute__((always_inline)) -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __ASM volatile ("nop") - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi":::"memory") - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __ASM volatile ("sev") - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -__STATIC_FORCEINLINE void __ISB(void) -{ - __ASM volatile ("isb 0xF":::"memory"); -} - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -__STATIC_FORCEINLINE void __DMB(void) -{ - __ASM volatile ("dmb 0xF":::"memory"); -} - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else - uint32_t result; - - __ASM ("rev %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) -{ - uint32_t result; - __ASM ("rev16 %0, %1" : "=r" (result) : "r" (value)); - return result; -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) -{ -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : "=r" (result) : "r" (value) ); - return result; -#endif -} - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); - return result; -} - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) -{ - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ -} - - -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); -} - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); -} - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); -} - - -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} - -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(ARG1, ARG2) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) - -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ - #if (__ARM_FP >= 1) - return __builtin_arm_get_fpscr(); - #else - return(0U); - #endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ - #if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); - #else - (void)fpscr; - #endif -} - - -/*@} end of CMSIS_Core_RegAccFunctions */ - - -/* ################### Compiler specific Intrinsics ########################### */ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __builtin_arm_sadd8 -#define __QADD8 __builtin_arm_qadd8 -#define __SHADD8 __builtin_arm_shadd8 -#define __UADD8 __builtin_arm_uadd8 -#define __UQADD8 __builtin_arm_uqadd8 -#define __UHADD8 __builtin_arm_uhadd8 -#define __SSUB8 __builtin_arm_ssub8 -#define __QSUB8 __builtin_arm_qsub8 -#define __SHSUB8 __builtin_arm_shsub8 -#define __USUB8 __builtin_arm_usub8 -#define __UQSUB8 __builtin_arm_uqsub8 -#define __UHSUB8 __builtin_arm_uhsub8 -#define __SADD16 __builtin_arm_sadd16 -#define __QADD16 __builtin_arm_qadd16 -#define __SHADD16 __builtin_arm_shadd16 -#define __UADD16 __builtin_arm_uadd16 -#define __UQADD16 __builtin_arm_uqadd16 -#define __UHADD16 __builtin_arm_uhadd16 -#define __SSUB16 __builtin_arm_ssub16 -#define __QSUB16 __builtin_arm_qsub16 -#define __SHSUB16 __builtin_arm_shsub16 -#define __USUB16 __builtin_arm_usub16 -#define __UQSUB16 __builtin_arm_uqsub16 -#define __UHSUB16 __builtin_arm_uhsub16 -#define __SASX __builtin_arm_sasx -#define __QASX __builtin_arm_qasx -#define __SHASX __builtin_arm_shasx -#define __UASX __builtin_arm_uasx -#define __UQASX __builtin_arm_uqasx -#define __UHASX __builtin_arm_uhasx -#define __SSAX __builtin_arm_ssax -#define __QSAX __builtin_arm_qsax -#define __SHSAX __builtin_arm_shsax -#define __USAX __builtin_arm_usax -#define __UQSAX __builtin_arm_uqsax -#define __UHSAX __builtin_arm_uhsax -#define __USAD8 __builtin_arm_usad8 -#define __USADA8 __builtin_arm_usada8 -#define __SSAT16 __builtin_arm_ssat16 -#define __USAT16 __builtin_arm_usat16 -#define __UXTB16 __builtin_arm_uxtb16 -#define __UXTAB16 __builtin_arm_uxtab16 -#define __SXTB16 __builtin_arm_sxtb16 -#define __SXTAB16 __builtin_arm_sxtab16 -#define __SMUAD __builtin_arm_smuad -#define __SMUADX __builtin_arm_smuadx -#define __SMLAD __builtin_arm_smlad -#define __SMLADX __builtin_arm_smladx -#define __SMLALD __builtin_arm_smlald -#define __SMLALDX __builtin_arm_smlaldx -#define __SMUSD __builtin_arm_smusd -#define __SMUSDX __builtin_arm_smusdx -#define __SMLSD __builtin_arm_smlsd -#define __SMLSDX __builtin_arm_smlsdx -#define __SMLSLD __builtin_arm_smlsld -#define __SMLSLDX __builtin_arm_smlsldx -#define __SEL __builtin_arm_sel -#define __QADD __builtin_arm_qadd -#define __QSUB __builtin_arm_qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) -{ - uint32_t result; - if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) - { - __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); - } - else - { - result = __SXTB16(__ROR(op1, rotate)); - } - return result; -} - -__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) -{ - uint32_t result; - if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) - { - __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); - } - else - { - result = __SXTAB16(op1, __ROR(op2, rotate)); - } - return result; -} - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ /** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics Access to dedicated SIMD instructions diff --git a/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h b/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h index e8bed5281..fe558d191 100644 --- a/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h +++ b/CMSIS/Core/Include/a-profile/cmsis_iccarm_a.h @@ -1,14 +1,14 @@ /**************************************************************************//** * @file cmsis_iccarm_a.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.0.8 - * @date 13. November 2022 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ //------------------------------------------------------------------------------ // // Copyright (c) 2017-2018 IAR Systems -// Copyright (c) 2018-2019 Arm Limited +// Copyright (c) 2018-2023 Arm Limited. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 // @@ -29,221 +29,24 @@ #ifndef __CMSIS_ICCARM_A_H__ #define __CMSIS_ICCARM_A_H__ -#ifndef __ICCARM__ - #error This file should only be compiled by ICCARM -#endif - -#pragma system_include - -#define __IAR_FT _Pragma("inline=forced") __intrinsic - -#if (__VER__ >= 8000000) - #define __ICCARM_V8 1 -#else - #define __ICCARM_V8 0 -#endif - -#pragma language=extended - -#ifndef __ALIGNED - #if __ICCARM_V8 - #define __ALIGNED(x) __attribute__((aligned(x))) - #elif (__VER__ >= 7080000) - /* Needs IAR language extensions */ - #define __ALIGNED(x) __attribute__((aligned(x))) - #else - #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. - #define __ALIGNED(x) - #endif -#endif - - -/* Define compiler macros for CPU architecture, used in CMSIS 5. - */ -#if __ARM_ARCH_7A__ -/* Macro already defined */ -#else - #if defined(__ARM7A__) - #define __ARM_ARCH_7A__ 1 - #endif -#endif - -#ifndef __ASM - #define __ASM __asm -#endif - -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - -#ifndef __INLINE - #define __INLINE inline -#endif - -#ifndef __NO_RETURN - #if __ICCARM_V8 - #define __NO_RETURN __attribute__((__noreturn__)) - #else - #define __NO_RETURN _Pragma("object_attribute=__noreturn") - #endif +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" #endif -#ifndef __PACKED - #if __ICCARM_V8 - #define __PACKED __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED __packed - #endif -#endif - -#ifndef __PACKED_STRUCT - #if __ICCARM_V8 - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_STRUCT __packed struct - #endif -#endif - -#ifndef __PACKED_UNION - #if __ICCARM_V8 - #define __PACKED_UNION union __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_UNION __packed union - #endif -#endif - -#ifndef __RESTRICT - #if __ICCARM_V8 - #define __RESTRICT __restrict - #else - /* Needs IAR language extensions */ - #define __RESTRICT restrict - #endif -#endif - -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif - -#ifndef __FORCEINLINE - #define __FORCEINLINE _Pragma("inline=forced") -#endif - -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE -#endif - -#ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) -#endif - -#ifndef __UNALIGNED_UINT16_READ - #pragma language=save - #pragma language=extended - __IAR_FT uint16_t __iar_uint16_read(void const *ptr) - { - return *(__packed uint16_t*)(ptr); - } - #pragma language=restore - #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) -#endif - - -#ifndef __UNALIGNED_UINT16_WRITE - #pragma language=save - #pragma language=extended - __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) - { - *(__packed uint16_t*)(ptr) = val;; - } - #pragma language=restore - #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) -#endif - -#ifndef __UNALIGNED_UINT32_READ - #pragma language=save - #pragma language=extended - __IAR_FT uint32_t __iar_uint32_read(void const *ptr) - { - return *(__packed uint32_t*)(ptr); - } - #pragma language=restore - #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) -#endif - -#ifndef __UNALIGNED_UINT32_WRITE - #pragma language=save - #pragma language=extended - __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) - { - *(__packed uint32_t*)(ptr) = val;; - } - #pragma language=restore - #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) -#endif - -#if 0 -#ifndef __UNALIGNED_UINT32 /* deprecated */ - #pragma language=save - #pragma language=extended - __packed struct __iar_u32 { uint32_t v; }; - #pragma language=restore - #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) -#endif -#endif - -#ifndef __USED - #if __ICCARM_V8 - #define __USED __attribute__((used)) - #else - #define __USED _Pragma("__root") - #endif -#endif - -#ifndef __WEAK - #if __ICCARM_V8 - #define __WEAK __attribute__((weak)) - #else - #define __WEAK _Pragma("__weak") - #endif -#endif - - -#ifndef __ICCARM_INTRINSICS_VERSION__ - #define __ICCARM_INTRINSICS_VERSION__ 0 +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM #endif #if __ICCARM_INTRINSICS_VERSION__ == 2 - - #if defined(__CLZ) - #undef __CLZ - #endif - #if defined(__REVSH) - #undef __REVSH - #endif - #if defined(__RBIT) - #undef __RBIT - #endif - #if defined(__SSAT) - #undef __SSAT - #endif - #if defined(__USAT) - #undef __USAT - #endif - - #include "iccarm_builtin.h" - - #define __disable_fault_irq __iar_builtin_disable_fiq + #define __disable_fault_irq __iar_builtin_disable_fiq #define __disable_irq __iar_builtin_disable_interrupt - #define __enable_fault_irq __iar_builtin_enable_fiq + #define __enable_fault_irq __iar_builtin_enable_fiq #define __enable_irq __iar_builtin_enable_interrupt #define __arm_rsr __iar_builtin_rsr #define __arm_wsr __iar_builtin_wsr - #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U))) + #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) #define __get_FPSCR() (__arm_rsr("FPSCR")) #define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE))) #else @@ -273,110 +76,8 @@ #define __set_CP64(cp, op1, Rt, CRm) \ __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) - #include "cmsis_cp15.h" - - #define __NOP __iar_builtin_no_operation - - #define __CLZ __iar_builtin_CLZ - #define __CLREX __iar_builtin_CLREX - - #define __DMB __iar_builtin_DMB - #define __DSB __iar_builtin_DSB - #define __ISB __iar_builtin_ISB + #include "cmsis_cp15.h" - #define __LDREXB __iar_builtin_LDREXB - #define __LDREXH __iar_builtin_LDREXH - #define __LDREXW __iar_builtin_LDREX - - #define __RBIT __iar_builtin_RBIT - #define __REV __iar_builtin_REV - #define __REV16 __iar_builtin_REV16 - - __IAR_FT int16_t __REVSH(int16_t val) - { - return (int16_t) __iar_builtin_REVSH(val); - } - - #define __ROR __iar_builtin_ROR - #define __RRX __iar_builtin_RRX - - #define __SEV __iar_builtin_SEV - - #define __SSAT __iar_builtin_SSAT - - #define __STREXB __iar_builtin_STREXB - #define __STREXH __iar_builtin_STREXH - #define __STREXW __iar_builtin_STREX - - #define __USAT __iar_builtin_USAT - - #define __WFE __iar_builtin_WFE - #define __WFI __iar_builtin_WFI - - #define __SADD8 __iar_builtin_SADD8 - #define __QADD8 __iar_builtin_QADD8 - #define __SHADD8 __iar_builtin_SHADD8 - #define __UADD8 __iar_builtin_UADD8 - #define __UQADD8 __iar_builtin_UQADD8 - #define __UHADD8 __iar_builtin_UHADD8 - #define __SSUB8 __iar_builtin_SSUB8 - #define __QSUB8 __iar_builtin_QSUB8 - #define __SHSUB8 __iar_builtin_SHSUB8 - #define __USUB8 __iar_builtin_USUB8 - #define __UQSUB8 __iar_builtin_UQSUB8 - #define __UHSUB8 __iar_builtin_UHSUB8 - #define __SADD16 __iar_builtin_SADD16 - #define __QADD16 __iar_builtin_QADD16 - #define __SHADD16 __iar_builtin_SHADD16 - #define __UADD16 __iar_builtin_UADD16 - #define __UQADD16 __iar_builtin_UQADD16 - #define __UHADD16 __iar_builtin_UHADD16 - #define __SSUB16 __iar_builtin_SSUB16 - #define __QSUB16 __iar_builtin_QSUB16 - #define __SHSUB16 __iar_builtin_SHSUB16 - #define __USUB16 __iar_builtin_USUB16 - #define __UQSUB16 __iar_builtin_UQSUB16 - #define __UHSUB16 __iar_builtin_UHSUB16 - #define __SASX __iar_builtin_SASX - #define __QASX __iar_builtin_QASX - #define __SHASX __iar_builtin_SHASX - #define __UASX __iar_builtin_UASX - #define __UQASX __iar_builtin_UQASX - #define __UHASX __iar_builtin_UHASX - #define __SSAX __iar_builtin_SSAX - #define __QSAX __iar_builtin_QSAX - #define __SHSAX __iar_builtin_SHSAX - #define __USAX __iar_builtin_USAX - #define __UQSAX __iar_builtin_UQSAX - #define __UHSAX __iar_builtin_UHSAX - #define __USAD8 __iar_builtin_USAD8 - #define __USADA8 __iar_builtin_USADA8 - #define __SSAT16 __iar_builtin_SSAT16 - #define __USAT16 __iar_builtin_USAT16 - #define __UXTB16 __iar_builtin_UXTB16 - #define __UXTAB16 __iar_builtin_UXTAB16 - #define __SXTB16 __iar_builtin_SXTB16 - #define __SXTAB16 __iar_builtin_SXTAB16 - #define __SMUAD __iar_builtin_SMUAD - #define __SMUADX __iar_builtin_SMUADX - #define __SMMLA __iar_builtin_SMMLA - #define __SMLAD __iar_builtin_SMLAD - #define __SMLADX __iar_builtin_SMLADX - #define __SMLALD __iar_builtin_SMLALD - #define __SMLALDX __iar_builtin_SMLALDX - #define __SMUSD __iar_builtin_SMUSD - #define __SMUSDX __iar_builtin_SMUSDX - #define __SMLSD __iar_builtin_SMLSD - #define __SMLSDX __iar_builtin_SMLSDX - #define __SMLSLD __iar_builtin_SMLSLD - #define __SMLSLDX __iar_builtin_SMLSLDX - #define __SEL __iar_builtin_SEL - #define __QADD __iar_builtin_QADD - #define __QSUB __iar_builtin_QSUB - #define __PKHBT __iar_builtin_PKHBT - #define __PKHTB __iar_builtin_PKHTB - -#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ #if !((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U))) #define __get_FPSCR __cmsis_iar_get_FPSR_not_active @@ -464,8 +165,6 @@ #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ -#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) - __IAR_FT uint32_t __get_SP_usr(void) { diff --git a/CMSIS/Core/Include/cmsis_armclang.h b/CMSIS/Core/Include/cmsis_armclang.h new file mode 100644 index 000000000..a6ae6cb89 --- /dev/null +++ b/CMSIS/Core/Include/cmsis_armclang.h @@ -0,0 +1,846 @@ +/**************************************************************************//** + * @file cmsis_armclang.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V6.0.0 + * @date 27. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCLANG_H +#define __CMSIS_ARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#if (__ARM_ACLE >= 200) + #include +#else + #error Compiler must support ACLE V2.0 +#endif /* (__ARM_ACLE >= 200) */ + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".bss.noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __nop() + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __wfi() + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __wfe() + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __sev() + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __rev(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __rev16(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) __revsh(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR(op1, op2) __ror(op1, op2) + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT(value) __rbit(value) + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ(value) __clz(value) + + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 1) +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0, #0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0, #0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0, #0" : "=Q" (*ptr) : "r" (value) ); +} +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ + + +#if (__ARM_ARCH >= 8) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} +#endif + + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (__ARM_FP >= 1) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (__ARM_FP >= 1) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (__ARM_FEATURE_DSP == 1) +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return (result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "./a-profile/cmsis_armclang_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "./r-profile/cmsis_armclang_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "./m-profile/cmsis_armclang_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_ARMCLANG_H */ diff --git a/CMSIS/Core/Include/cmsis_clang.h b/CMSIS/Core/Include/cmsis_clang.h new file mode 100644 index 000000000..bd22f7915 --- /dev/null +++ b/CMSIS/Core/Include/cmsis_clang.h @@ -0,0 +1,847 @@ +/**************************************************************************//** + * @file cmsis_clang.h + * @brief CMSIS compiler LLVM/Clang header file + * @version V6.0.0 + * @date 27. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_CLANG_H +#define __CMSIS_CLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#if (__ARM_ACLE >= 200) + #include +#else + #error Compiler must support ACLE V2.0 +#endif /* (__ARM_ACLE >= 200) */ + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".bss.noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __nop() + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __wfi() + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __wfe() + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __sev() + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __rev(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __rev16(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) __revsh(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR(op1, op2) __ror(op1, op2) + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT(value) __rbit(value) + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ(value) __clz(value) + + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 1) +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ + + +#if (__ARM_ARCH >= 8) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} +#endif + + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (__ARM_FP >= 1) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (__ARM_FP >= 1) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (__ARM_FEATURE_DSP == 1) +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return (result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "a-profile/cmsis_clang_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "r-profile/cmsis_clang_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_clang_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_CLANG_H */ diff --git a/CMSIS/Core/Include/cmsis_compiler.h b/CMSIS/Core/Include/cmsis_compiler.h index 82c90b32e..e69310c94 100644 --- a/CMSIS/Core/Include/cmsis_compiler.h +++ b/CMSIS/Core/Include/cmsis_compiler.h @@ -31,74 +31,34 @@ * Arm Compiler above 6.10.1 (armclang) */ #if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) - #if __ARM_ARCH_PROFILE == 'A' - #include "./a-profile/cmsis_armclang_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "./r-profile/cmsis_armclang_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "./m-profile/cmsis_armclang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_armclang.h" /* * TI Arm Clang Compiler (tiarmclang) */ #elif defined (__ti__) - #if __ARM_ARCH_PROFILE == 'A' - #error "Core-A is not supported for this compiler" - #elif __ARM_ARCH_PROFILE == 'R' - #error "Core-R is not supported for this compiler" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_tiarmclang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_tiarmclang.h" /* * LLVM/Clang Compiler */ #elif defined ( __clang__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_clang_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_clang_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_clang_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_clang.h" /* * GNU Compiler */ #elif defined ( __GNUC__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_gcc_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_gcc_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_gcc_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_gcc.h" /* * IAR Compiler */ #elif defined ( __ICCARM__ ) - #if __ARM_ARCH_PROFILE == 'A' - #include "a-profile/cmsis_iccarm_a.h" - #elif __ARM_ARCH_PROFILE == 'R' - #include "r-profile/cmsis_iccarm_r.h" - #elif __ARM_ARCH_PROFILE == 'M' - #include "m-profile/cmsis_iccarm_m.h" - #else - #error "Unknown Arm architecture profile" - #endif + #include "cmsis_iccarm.h" /* diff --git a/CMSIS/Core/Include/cmsis_gcc.h b/CMSIS/Core/Include/cmsis_gcc.h new file mode 100644 index 000000000..127dfefad --- /dev/null +++ b/CMSIS/Core/Include/cmsis_gcc.h @@ -0,0 +1,1019 @@ +/**************************************************************************//** + * @file cmsis_gcc.h + * @brief CMSIS compiler GCC header file + * @version V6.0.0 + * @date 27. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_H +#define __CMSIS_GCC_H + +#pragma clang system_header /* treat file as system include file */ + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + +//#if (__ARM_ACLE >= 200) + #include +//#else +// #error Compiler must support ACLE V2.0 +//#endif /* (__ARM_ACLE >= 200) */ + +/* Fallback for __has_builtin */ +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpacked" + #pragma GCC diagnostic ignored "-Wattributes" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma GCC diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".bss.noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __ASM volatile ("nop") + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi":::"memory") + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __ASM volatile ("wfe":::"memory") + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __ASM volatile ("sev") + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +__STATIC_FORCEINLINE void __ISB(void) +{ + __ASM volatile ("isb 0xF":::"memory"); +} + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +__STATIC_FORCEINLINE void __DSB(void) +{ + __ASM volatile ("dsb 0xF":::"memory"); +} + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +__STATIC_FORCEINLINE void __DMB(void) +{ + __ASM volatile ("dmb 0xF":::"memory"); +} + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +{ + return __builtin_bswap32(value); +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +{ + uint32_t result; + + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return (result); +} + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +{ + return (int16_t)__builtin_bswap16(value); +} + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +{ + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); +} + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +{ + uint32_t result; + +#if (__ARM_ARCH_ISA_THUMB >= 2) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return (result); +} + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +{ + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); +} + + +#if (__ARM_FEATURE_SAT >= 1) +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 1) +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +__STATIC_FORCEINLINE void __CLREX(void) +{ + __ASM volatile ("clrex" ::: "memory"); +} + + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint8_t) result); /* Add explicit type cast here */ +} + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); +} +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +{ + uint32_t result; + + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return (result); +} +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return (result); +} + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +{ + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return (result); +} +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ + + +#if (__ARM_ARCH >= 8) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); +} + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); +} + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return (result); +} + +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +#if (__ARM_ARCH_ISA_THUMB >= 2) + /** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ + __STATIC_FORCEINLINE void __enable_fault_irq(void) + { + __ASM volatile ("cpsie f" : : : "memory"); + } + + + /** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ + __STATIC_FORCEINLINE void __disable_fault_irq(void) + { + __ASM volatile ("cpsid f" : : : "memory"); + } +#endif + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (__ARM_FP >= 1) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (__ARM_FP >= 1) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (__ARM_FEATURE_DSP == 1) + #define __SADD8 __sadd8 + #define __QADD8 __qadd8 + #define __SHADD8 __shadd8 + #define __UADD8 __uadd8 + #define __UQADD8 __uqadd8 + #define __UHADD8 __uhadd8 + #define __SSUB8 __ssub8 + #define __QSUB8 __qsub8 + #define __SHSUB8 __shsub8 + #define __USUB8 __usub8 + #define __UQSUB8 __uqsub8 + #define __UHSUB8 __uhsub8 + #define __SADD16 __sadd16 + #define __QADD16 __qadd16 + #define __SHADD16 __shadd16 + #define __UADD16 __uadd16 + #define __UQADD16 __uqadd16 + #define __UHADD16 __uhadd16 + #define __SSUB16 __ssub16 + #define __QSUB16 __qsub16 + #define __SHSUB16 __shsub16 + #define __USUB16 __usub16 + #define __UQSUB16 __uqsub16 + #define __UHSUB16 __uhsub16 + #define __SASX __sasx + #define __QASX __qasx + #define __SHASX __shasx + #define __UASX __uasx + #define __UQASX __uqasx + #define __UHASX __uhasx + #define __SSAX __ssax + #define __QSAX __qsax + #define __SHSAX __shsax + #define __USAX __usax + #define __UQSAX __uqsax + #define __UHSAX __uhsax + #define __USAD8 __usad8 + #define __USADA8 __usada8 + #define __SSAT16 __ssat16 + #define __USAT16 __usat16 + #define __UXTB16 __uxtb16 + #define __UXTAB16 __uxtab16 + #define __SXTB16 __sxtb16 + #define __SXTAB16 __sxtab16 + #define __SMUAD __smuad + #define __SMUADX __smuadx + #define __SMLAD __smlad + #define __SMLADX __smladx + #define __SMLALD __smlald + #define __SMLALDX __smlaldx + #define __SMUSD __smusd + #define __SMUSDX __smusdx + #define __SMLSD __smlsd + #define __SMLSDX __smlsdx + #define __SMLSLD __smlsld + #define __SMLSLDX __smlsldx + #define __SEL __sel + #define __QADD __qadd + #define __QSUB __qsub + + #define __PKHBT(ARG1,ARG2,ARG3) \ + __extension__ \ + ({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + + #define __PKHTB(ARG1,ARG2,ARG3) \ + __extension__ \ + ({ \ + uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ + if (ARG3 == 0) \ + __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ + else \ + __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ + __RES; \ + }) + + __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) + { + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); + } + else + { + result = __SXTB16(__ROR(op1, rotate)); + } + return result; + } + + __STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) + { + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) + { + __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); + } + else + { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; + } + + __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) + { + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return (result); + } +#endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ + + +#pragma GCC diagnostic pop + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "a-profile/cmsis_gcc_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "r-profile/cmsis_gcc_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_gcc_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_GCC_H */ diff --git a/CMSIS/Core/Include/cmsis_iccarm.h b/CMSIS/Core/Include/cmsis_iccarm.h new file mode 100644 index 000000000..42aa3659b --- /dev/null +++ b/CMSIS/Core/Include/cmsis_iccarm.h @@ -0,0 +1,920 @@ +/**************************************************************************//** + * @file cmsis_iccarm.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V6.0.0 + * @date 27. July 2023 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2017-2021 IAR Systems +// Copyright (c) 2017-2023 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + +#ifndef __CMSIS_ICCARM_H__ +#define __CMSIS_ICCARM_H__ + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#pragma system_include + +#define __IAR_FT _Pragma("inline=forced") __intrinsic + +#if (__VER__ >= 8000000) + #define __ICCARM_V8 1 +#else + #define __ICCARM_V8 0 +#endif + +//#pragma language=extended + +#ifndef __ALIGNED + #if __ICCARM_V8 + #define __ALIGNED(x) __attribute__((aligned(x))) + #elif (__VER__ >= 7080000) + /* Needs IAR language extensions */ + #define __ALIGNED(x) __attribute__((aligned(x))) + #else + #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. + #define __ALIGNED(x) + #endif +#endif + + +/* Define compiler macros for CPU architecture, used in CMSIS 5. + */ +#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ || __ARM_ARCH_7A__ + /* Macros already defined */ +#else + #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM7A__) + #define __ARM_ARCH_7A__ 1 + #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' + #if __ARM_ARCH == 6 + #define __ARM_ARCH_6M__ 1 + #elif __ARM_ARCH == 7 + #if __ARM_FEATURE_DSP + #define __ARM_ARCH_7EM__ 1 + #else + #define __ARM_ARCH_7M__ 1 + #endif + #endif /* __ARM_ARCH */ + #endif /* __ARM_ARCH_PROFILE == 'M' */ +#endif + +/* Alternativ core deduction for older ICCARM's */ +#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ + !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) + #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) + #define __ARM_ARCH_6M__ 1 + #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) + #define __ARM_ARCH_7M__ 1 + #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) + #define __ARM_ARCH_7EM__ 1 + #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) + #define __ARM_ARCH_8M_BASE__ 1 + #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) + #define __ARM_ARCH_8M_MAIN__ 1 + #else + #error "Unknown target." + #endif +#endif + + + +#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 + #define __IAR_M0_FAMILY 1 +#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 + #define __IAR_M0_FAMILY 1 +#else + #define __IAR_M0_FAMILY 0 +#endif + + +#ifndef __ASM + #define __ASM __asm +#endif + +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif + +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +#ifndef __INLINE + #define __INLINE inline +#endif + +#ifndef __NO_RETURN + #if __ICCARM_V8 + #define __NO_RETURN __attribute__((__noreturn__)) + #else + #define __NO_RETURN _Pragma("object_attribute=__noreturn") + #endif +#endif + +#ifndef __PACKED + #if __ICCARM_V8 + #define __PACKED __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED __packed + #endif +#endif + +#ifndef __PACKED_STRUCT + #if __ICCARM_V8 + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_STRUCT __packed struct + #endif +#endif + +#ifndef __PACKED_UNION + #if __ICCARM_V8 + #define __PACKED_UNION union __attribute__((packed, aligned(1))) + #else + /* Needs IAR language extensions */ + #define __PACKED_UNION __packed union + #endif +#endif + +#ifndef __RESTRICT + #if __ICCARM_V8 + #define __RESTRICT __restrict + #else + /* Needs IAR language extensions */ + #define __RESTRICT restrict + #endif +#endif + +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static inline +#endif + +#ifndef __FORCEINLINE + #define __FORCEINLINE _Pragma("inline=forced") +#endif + +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE +#endif + +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif + +#ifndef __UNALIGNED_UINT16_READ + #pragma language=save + #pragma language=extended + __IAR_FT uint16_t __iar_uint16_read(void const *ptr) + { + return *(__packed uint16_t*)(ptr); + } + #pragma language=restore + #define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) +#endif + + +#ifndef __UNALIGNED_UINT16_WRITE + #pragma language=save + #pragma language=extended + __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) + { + *(__packed uint16_t*)(ptr) = val;; + } + #pragma language=restore + #define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) +#endif + +#ifndef __UNALIGNED_UINT32_READ + #pragma language=save + #pragma language=extended + __IAR_FT uint32_t __iar_uint32_read(void const *ptr) + { + return *(__packed uint32_t*)(ptr); + } + #pragma language=restore + #define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) +#endif + +#ifndef __UNALIGNED_UINT32_WRITE + #pragma language=save + #pragma language=extended + __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) + { + *(__packed uint32_t*)(ptr) = val;; + } + #pragma language=restore + #define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#endif + +#if !defined (__arm__) + #ifndef __UNALIGNED_UINT32 /* deprecated */ + #pragma language=save + #pragma language=extended + __packed struct __iar_u32 { uint32_t v; }; + #pragma language=restore + #define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) + #endif +#endif + +#ifndef __USED + #if __ICCARM_V8 + #define __USED __attribute__((used)) + #else + #define __USED _Pragma("__root") + #endif +#endif + +#undef __WEAK /* undo the definition from DLib_Defaults.h */ +#ifndef __WEAK + #if __ICCARM_V8 + #define __WEAK __attribute__((weak)) + #else + #define __WEAK _Pragma("__weak") + #endif +#endif + +#ifndef __ICCARM_INTRINSICS_VERSION__ + #define __ICCARM_INTRINSICS_VERSION__ 0 +#endif + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #if defined(__CLZ) + #undef __CLZ + #endif + #if defined(__REVSH) + #undef __REVSH + #endif + #if defined(__RBIT) + #undef __RBIT + #endif + #if defined(__SSAT) + #undef __SSAT + #endif + #if defined(__USAT) + #undef __USAT + #endif + + #include "iccarm_builtin.h" +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + #ifdef __INTRINSICS_INCLUDED + #error intrinsics.h is already included previously! + #endif + + #include +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ + +#if __ICCARM_INTRINSICS_VERSION__ == 2 + /* ########################## Core Instruction Access ######################### */ + /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ + */ + + /** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ + #define __NOP __iar_builtin_no_operation + + /** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ + #define __WFI __iar_builtin_WFI + + /** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ + #define __WFE __iar_builtin_WFE + + /** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ + #define __SEV __iar_builtin_SEV + + /** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ + #define __ISB __iar_builtin_ISB + + /** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ + #define __DSB __iar_builtin_DSB + + /** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ + #define __DMB __iar_builtin_DMB + + /** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ + #define __REV __iar_builtin_REV + + /** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ + #define __REV16 __iar_builtin_REV16 + + /** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ + __IAR_FT int16_t __REVSH(int16_t val) + { + return (int16_t) __iar_builtin_REVSH(val); + } + + /** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ + #define __ROR __iar_builtin_ROR +#else + __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) + { + return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); + } +#endif + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #define __RBIT __iar_builtin_RBIT +#elif __IAR_M0_FAMILY + __STATIC_INLINE uint32_t __RBIT(uint32_t v) + { + uint8_t sc = 31U; + uint32_t r = v; + for (v >>= 1U; v; v >>= 1U) + { + r <<= 1U; + r |= v & 1U; + sc--; + } + return (r << sc); + } +#endif + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #define __CLZ __iar_builtin_CLZ +#elif __IAR_M0_FAMILY + __STATIC_INLINE uint8_t __CLZ(uint32_t data) + { + if (data == 0U) { return 32U; } + + uint32_t count = 0U; + uint32_t mask = 0x80000000U; + + while ((data & mask) == 0U) + { + count += 1U; + mask = mask >> 1U; + } + return count; + } +#endif + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB __iar_builtin_LDREXB + +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH __iar_builtin_LDREXH + +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #define __LDREXW __iar_builtin_LDREX +#elif (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) + { + return __LDREX((unsigned long *)ptr); + } +#endif + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB __iar_builtin_STREXB + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH __iar_builtin_STREXH + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #define __STREXW __iar_builtin_STREX +#elif (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) + __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) + { + return __STREX(value, (unsigned long *)ptr); + } +#endif + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __iar_builtin_CLREX + +#if !__IAR_M0_FAMILY + /** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value + */ + #define __SSAT __iar_builtin_SSAT + + /** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value + */ + #define __USAT __iar_builtin_USAT +#else /* !__IAR_M0_FAMILY */ + /** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] val Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ + __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) + { + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; + } + + /** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] val Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ + __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) + { + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; + } +#endif /* !__IAR_M0_FAMILY */ + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#if __ICCARM_INTRINSICS_VERSION__ == 2 + #define __RRX __iar_builtin_RRX +#elif (defined (__CORTEX_M) && __CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + __IAR_FT uint32_t __RRX(uint32_t value) + { + uint32_t result; + __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); + return(result); + } +#endif + +#if (defined (__CORTEX_M) && __CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ + /** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ + __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) + { + uint32_t res; + __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint8_t)res); + } + + /** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ + __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) + { + uint32_t res; + __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return ((uint16_t)res); + } + + /** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ + __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) + { + uint32_t res; + __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); + return res; + } + + /** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) + { + __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + /** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) + { + __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); + } + + /** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) + { + __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); + } +#endif /* (defined (__CORTEX_M) && __CORTEX_M >= 0x03) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + /** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ + __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + /** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ + __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + /** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ + __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + /** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) + { + __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + /** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) + { + __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + /** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ + __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) + { + __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); + } + + /** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ + __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint8_t)res); + } + + /** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ + __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return ((uint16_t)res); + } + + /** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ + __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); + return res; + } + + /** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ + __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + /** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ + __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } + + /** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ + __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) + { + uint32_t res; + __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); + return res; + } +#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ + + +#if __ICCARM_INTRINSICS_VERSION__ == 2 && __ARM_MEDIA__ + #define __SADD8 __iar_builtin_SADD8 + #define __QADD8 __iar_builtin_QADD8 + #define __SHADD8 __iar_builtin_SHADD8 + #define __UADD8 __iar_builtin_UADD8 + #define __UQADD8 __iar_builtin_UQADD8 + #define __UHADD8 __iar_builtin_UHADD8 + #define __SSUB8 __iar_builtin_SSUB8 + #define __QSUB8 __iar_builtin_QSUB8 + #define __SHSUB8 __iar_builtin_SHSUB8 + #define __USUB8 __iar_builtin_USUB8 + #define __UQSUB8 __iar_builtin_UQSUB8 + #define __UHSUB8 __iar_builtin_UHSUB8 + #define __SADD16 __iar_builtin_SADD16 + #define __QADD16 __iar_builtin_QADD16 + #define __SHADD16 __iar_builtin_SHADD16 + #define __UADD16 __iar_builtin_UADD16 + #define __UQADD16 __iar_builtin_UQADD16 + #define __UHADD16 __iar_builtin_UHADD16 + #define __SSUB16 __iar_builtin_SSUB16 + #define __QSUB16 __iar_builtin_QSUB16 + #define __SHSUB16 __iar_builtin_SHSUB16 + #define __USUB16 __iar_builtin_USUB16 + #define __UQSUB16 __iar_builtin_UQSUB16 + #define __UHSUB16 __iar_builtin_UHSUB16 + #define __SASX __iar_builtin_SASX + #define __QASX __iar_builtin_QASX + #define __SHASX __iar_builtin_SHASX + #define __UASX __iar_builtin_UASX + #define __UQASX __iar_builtin_UQASX + #define __UHASX __iar_builtin_UHASX + #define __SSAX __iar_builtin_SSAX + #define __QSAX __iar_builtin_QSAX + #define __SHSAX __iar_builtin_SHSAX + #define __USAX __iar_builtin_USAX + #define __UQSAX __iar_builtin_UQSAX + #define __UHSAX __iar_builtin_UHSAX + #define __USAD8 __iar_builtin_USAD8 + #define __USADA8 __iar_builtin_USADA8 + #define __SSAT16 __iar_builtin_SSAT16 + #define __USAT16 __iar_builtin_USAT16 + #define __UXTB16 __iar_builtin_UXTB16 + #define __UXTAB16 __iar_builtin_UXTAB16 + #define __SXTB16 __iar_builtin_SXTB16 + #define __SXTAB16 __iar_builtin_SXTAB16 + #define __SMUAD __iar_builtin_SMUAD + #define __SMUADX __iar_builtin_SMUADX + #define __SMMLA __iar_builtin_SMMLA + #define __SMLAD __iar_builtin_SMLAD + #define __SMLADX __iar_builtin_SMLADX + #define __SMLALD __iar_builtin_SMLALD + #define __SMLALDX __iar_builtin_SMLALDX + #define __SMUSD __iar_builtin_SMUSD + #define __SMUSDX __iar_builtin_SMUSDX + #define __SMLSD __iar_builtin_SMLSD + #define __SMLSDX __iar_builtin_SMLSDX + #define __SMLSLD __iar_builtin_SMLSLD + #define __SMLSLDX __iar_builtin_SMLSLDX + #define __SEL __iar_builtin_SEL + #define __QADD __iar_builtin_QADD + #define __QSUB __iar_builtin_QSUB + #define __PKHBT __iar_builtin_PKHBT + #define __PKHTB __iar_builtin_PKHTB +#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 && __ARM_MEDIA__*/ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #include "a-profile/cmsis_iccarm_a.h" +#elif __ARM_ARCH_PROFILE == 'R' + #include "r-profile/cmsis_iccarm_r.h" +#elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_iccarm_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_ICCARM_H__ */ diff --git a/CMSIS/Core/Include/cmsis_tiarmclang.h b/CMSIS/Core/Include/cmsis_tiarmclang.h new file mode 100644 index 000000000..9b15b21e9 --- /dev/null +++ b/CMSIS/Core/Include/cmsis_tiarmclang.h @@ -0,0 +1,833 @@ +/**************************************************************************//** + * @file cmsis_tiarmclang.h + * @brief CMSIS compiler tiarmclang header file + * @version V6.0.0 + * @date 27. July 2023 + ******************************************************************************/ +/* + * Copyright (c) 2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_TIARMCLANG_H +#define __CMSIS_TIARMCLANG_H + +#pragma clang system_header /* treat file as system include file */ + +#if (__ARM_ACLE >= 200) + #include +#else + #error Compiler must support ACLE V2.0 +#endif /* (__ARM_ACLE >= 200) */ + +/* CMSIS compiler specific defines */ +#ifndef __ASM + #define __ASM __asm +#endif +#ifndef __INLINE + #define __INLINE __inline +#endif +#ifndef __STATIC_INLINE + #define __STATIC_INLINE static __inline +#endif +#ifndef __STATIC_FORCEINLINE + #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline +#endif +#ifndef __NO_RETURN + #define __NO_RETURN __attribute__((__noreturn__)) +#endif +#ifndef CMSIS_DEPRECATED + #define CMSIS_DEPRECATED __attribute__((deprecated)) +#endif +#ifndef __USED + #define __USED __attribute__((used)) +#endif +#ifndef __WEAK + #define __WEAK __attribute__((weak)) +#endif +#ifndef __PACKED + #define __PACKED __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_STRUCT + #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) +#endif +#ifndef __PACKED_UNION + #define __PACKED_UNION union __attribute__((packed, aligned(1))) +#endif +#ifndef __UNALIGNED_UINT16_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT16_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) +#endif +#ifndef __UNALIGNED_UINT32_WRITE + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) +#endif +#ifndef __UNALIGNED_UINT32_READ + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wpacked" + __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; + #pragma clang diagnostic pop + #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) +#endif +#ifndef __ALIGNED + #define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __RESTRICT + #define __RESTRICT __restrict +#endif +#ifndef __COMPILER_BARRIER + #define __COMPILER_BARRIER() __ASM volatile("":::"memory") +#endif +#ifndef __NO_INIT + #define __NO_INIT __attribute__ ((section (".bss.noinit"))) +#endif +#ifndef __ALIAS + #define __ALIAS(x) __attribute__ ((alias(x))) +#endif + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. + */ +#define __NOP() __nop() + + +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __wfi() + + +/** + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. + */ +#define __WFE() __wfe() + + +/** + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + */ +#define __SEV() __sev() + + +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __isb(0xF) + + +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) + + +/** + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. + */ +#define __DMB() __dmb(0xF) + + +/** + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV(value) __rev(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REV16(value) __rev16(value) + + +/** + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value + */ +#define __REVSH(value) __revsh(value) + + +/** + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value + */ +#define __ROR(op1, op2) __ror(op1, op2) + + +/** + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. + */ +#define __BKPT(value) __ASM volatile ("bkpt "#value) + + +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT(value) __rbit(value) + + +/** + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value + */ +#define __CLZ(value) __clz(value) + + +#if ((__ARM_FEATURE_SAT >= 1) && \ + (__ARM_ARCH_ISA_THUMB >= 2) ) +/* __ARM_FEATURE_SAT is wrong for for Armv8-M Baseline devices */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT(value, sat) __ssat(value, sat) + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT(value, sat) __usat(value, sat) + +#else /* (__ARM_FEATURE_SAT >= 1) */ +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return (max); + } + else if (val < min) + { + return (min); + } + } + return (val); +} + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return (max); + } + else if (val < 0) + { + return (0U); + } + } + return ((uint32_t)val); +} +#endif /* (__ARM_FEATURE_SAT >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 1) +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __builtin_arm_clrex + + +/** + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDREXB (uint8_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 1) */ + + +#if (__ARM_FEATURE_LDREX >= 2) +/** + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDREXH (uint16_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXH (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 2) */ + + +#if (__ARM_FEATURE_LDREX >= 4) +/** + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDREXW (uint32_t)__builtin_arm_ldrex + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXW (uint32_t)__builtin_arm_strex +#endif /* (__ARM_FEATURE_LDREX >= 4) */ + + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +{ + uint32_t result; + + __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); + return (result); +} + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return (result); +} + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); +} + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); +} +#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ + + +#if (__ARM_ARCH >= 8) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +{ + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t)result); /* Add explicit type cast here */ +} + + +/** + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +{ + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return (result); +} + + +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +{ + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +{ + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +{ + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +} + + +/** + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDAEXH (uint16_t)__builtin_arm_ldaex + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* (__ARM_ARCH >= 8) */ + +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} +#endif + +#if (__ARM_ARCH_ISA_THUMB >= 2) +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} +#endif + + + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +{ +#if (__ARM_FP >= 1) + return (__builtin_arm_get_fpscr()); +#else + return (0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +{ +#if (__ARM_FP >= 1) + __builtin_arm_set_fpscr(fpscr); +#else + (void)fpscr; +#endif +} + + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* ################### Compiler specific Intrinsics ########################### */ +/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +#if (__ARM_FEATURE_DSP == 1) +#define __SADD8 __sadd8 +#define __QADD8 __qadd8 +#define __SHADD8 __shadd8 +#define __UADD8 __uadd8 +#define __UQADD8 __uqadd8 +#define __UHADD8 __uhadd8 +#define __SSUB8 __ssub8 +#define __QSUB8 __qsub8 +#define __SHSUB8 __shsub8 +#define __USUB8 __usub8 +#define __UQSUB8 __uqsub8 +#define __UHSUB8 __uhsub8 +#define __SADD16 __sadd16 +#define __QADD16 __qadd16 +#define __SHADD16 __shadd16 +#define __UADD16 __uadd16 +#define __UQADD16 __uqadd16 +#define __UHADD16 __uhadd16 +#define __SSUB16 __ssub16 +#define __QSUB16 __qsub16 +#define __SHSUB16 __shsub16 +#define __USUB16 __usub16 +#define __UQSUB16 __uqsub16 +#define __UHSUB16 __uhsub16 +#define __SASX __sasx +#define __QASX __qasx +#define __SHASX __shasx +#define __UASX __uasx +#define __UQASX __uqasx +#define __UHASX __uhasx +#define __SSAX __ssax +#define __QSAX __qsax +#define __SHSAX __shsax +#define __USAX __usax +#define __UQSAX __uqsax +#define __UHSAX __uhsax +#define __USAD8 __usad8 +#define __USADA8 __usada8 +#define __SSAT16 __ssat16 +#define __USAT16 __usat16 +#define __UXTB16 __uxtb16 +#define __UXTAB16 __uxtab16 +#define __SXTB16 __sxtb16 +#define __SXTAB16 __sxtab16 +#define __SMUAD __smuad +#define __SMUADX __smuadx +#define __SMLAD __smlad +#define __SMLADX __smladx +#define __SMLALD __smlald +#define __SMLALDX __smlaldx +#define __SMUSD __smusd +#define __SMUSDX __smusdx +#define __SMLSD __smlsd +#define __SMLSDX __smlsdx +#define __SMLSLD __smlsld +#define __SMLSLDX __smlsldx +#define __SEL __sel +#define __QADD __qadd +#define __QSUB __qsub + +#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ + ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) + +#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ + ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) + +#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) + +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + +__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) +{ + int32_t result; + + __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); + return (result); +} + +#endif /* (__ARM_FEATURE_DSP == 1) */ +/** @} end of group CMSIS_SIMD_intrinsics */ + +// Include the profile specific settings: +#if __ARM_ARCH_PROFILE == 'A' + #error "Core-A is not supported for this compiler" +#elif __ARM_ARCH_PROFILE == 'R' + #error "Core-R is not supported for this compiler" +#elif __ARM_ARCH_PROFILE == 'M' + #include "m-profile/cmsis_tiarmclang_m.h" +#else + #error "Unknown Arm architecture profile" +#endif + +#endif /* __CMSIS_TIARMCLANG_H */ diff --git a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h index 84bd6faf4..76d01b490 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_armclang_m.h @@ -2,7 +2,7 @@ * @file cmsis_armclang_m.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file * @version V6.0.0 - * @date 27. July 2023 + * @date 4. August 2023 ******************************************************************************/ /* * Copyright (c) 2009-2023 Arm Limited. All rights reserved. @@ -27,657 +27,49 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START -#define __PROGRAM_START __main + #define __PROGRAM_START __main #endif #ifndef __INITIAL_SP -#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit + #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base + #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif #if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif - -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif - - -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } + #ifndef __STACK_SEAL + #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base + #endif + + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif + + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif + + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ +#endif /* ########################### Core Function Access ########################### */ @@ -686,31 +78,6 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) @{ */ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} -#endif - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} -#endif - /** \brief Get Control Register @@ -727,18 +94,18 @@ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } #endif @@ -755,16 +122,16 @@ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } #endif @@ -825,18 +192,18 @@ __STATIC_FORCEINLINE uint32_t __get_PSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); + } #endif @@ -852,15 +219,15 @@ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) + { + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + } #endif @@ -879,18 +246,18 @@ __STATIC_FORCEINLINE uint32_t __get_MSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } #endif @@ -906,42 +273,42 @@ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) + { + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + } #endif #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} - - -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } + + + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } #endif @@ -960,18 +327,18 @@ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } #endif @@ -987,41 +354,19 @@ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } #endif #if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - /** \brief Get Base Priority \details Returns the current value of the Base Priority register. @@ -1037,18 +382,18 @@ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } #endif @@ -1064,15 +409,15 @@ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } #endif @@ -1103,18 +448,18 @@ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } #endif @@ -1130,15 +475,15 @@ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ @@ -1158,7 +503,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ return (0U); #else @@ -1205,7 +550,7 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ (void)ProcStackPtrLimit; #else @@ -1215,24 +560,24 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } #endif @@ -1248,7 +593,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ return (0U); #else @@ -1260,26 +605,26 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1295,7 +640,7 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ (void)MainStackPtrLimit; #else @@ -1305,162 +650,27 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } #endif - #endif /* (__ARM_ARCH >= 8) */ - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - /** @} end of CMSIS_Core_RegAccFunctions */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ - - #endif /* __CMSIS_ARMCLANG_M_H */ diff --git a/CMSIS/Core/Include/m-profile/cmsis_clang_m.h b/CMSIS/Core/Include/m-profile/cmsis_clang_m.h index 48600ee1a..e5c4f0a4d 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_clang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_clang_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_clang_m.h * @brief CMSIS compiler LLVM/Clang header file - * @version V1.1.0 - * @date 27. July 2023 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ /* * Copyright (c) 2009-2023 Arm Limited. All rights reserved. @@ -27,662 +27,49 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START -#define __PROGRAM_START _start + #define __PROGRAM_START __main #endif #ifndef __INITIAL_SP -#define __INITIAL_SP __stack + #define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT __stack_limit + #define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif #if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL __stack_seal -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif - -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif - - -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/* __ARM_FEATURE_SAT is wrong for Armv8-M Baseline devices */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } + #ifndef __STACK_SEAL + #define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base + #endif + + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif + + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif + + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ +#endif /* ########################### Core Function Access ########################### */ @@ -691,27 +78,6 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) @{ */ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - /** \brief Get Control Register @@ -728,18 +94,18 @@ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } #endif @@ -756,16 +122,16 @@ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } #endif @@ -826,18 +192,18 @@ __STATIC_FORCEINLINE uint32_t __get_PSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); + } #endif @@ -853,15 +219,15 @@ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) + { + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + } #endif @@ -880,18 +246,18 @@ __STATIC_FORCEINLINE uint32_t __get_MSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } #endif @@ -907,42 +273,42 @@ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) + { + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + } #endif #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } #endif @@ -961,18 +327,18 @@ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } #endif @@ -988,41 +354,19 @@ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } #endif #if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - /** \brief Get Base Priority \details Returns the current value of the Base Priority register. @@ -1038,18 +382,18 @@ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } #endif @@ -1065,15 +409,15 @@ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } #endif @@ -1104,18 +448,18 @@ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } #endif @@ -1131,15 +475,15 @@ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ @@ -1159,7 +503,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ return (0U); #else @@ -1206,7 +550,7 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ (void)ProcStackPtrLimit; #else @@ -1216,24 +560,24 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } #endif @@ -1249,7 +593,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ return (0U); #else @@ -1261,26 +605,26 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1296,7 +640,7 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ (void)MainStackPtrLimit; #else @@ -1306,162 +650,27 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } #endif - #endif /* (__ARM_ARCH >= 8) */ - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - /** @} end of CMSIS_Core_RegAccFunctions */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ - - #endif /* __CMSIS_CLANG_M_H */ diff --git a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h index ba5eafe00..f0fdc59e7 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_gcc_m.h @@ -25,7 +25,9 @@ #ifndef __CMSIS_GCC_M_H #define __CMSIS_GCC_M_H -#pragma clang system_header /* treat file as system include file */ +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif /* ignore some GCC warnings */ #pragma GCC diagnostic push @@ -33,845 +35,89 @@ #pragma GCC diagnostic ignored "-Wconversion" #pragma GCC diagnostic ignored "-Wunused-parameter" -//#if (__ARM_ACLE >= 200) - #include -//#else -// #error Compiler must support ACLE V2.0 -//#endif /* (__ARM_ACLE >= 200) */ - -/* Fallback for __has_builtin */ -#ifndef __has_builtin - #define __has_builtin(x) (0) -#endif - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wpacked" - #pragma GCC diagnostic ignored "-Wattributes" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma GCC diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) -#endif /* ######################### Startup and Lowlevel Init ######################## */ -#ifndef __PROGRAM_START -/** - \brief Initializes data and bss sections - \details This default implementations initialized all data and additional bss - sections relying on .copy.table and .zero.table specified properly - in the used linker script. - - */ -__STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) -{ - extern void _start(void) __NO_RETURN; - - typedef struct __copy_table { - uint32_t const* src; - uint32_t* dest; - uint32_t wlen; - } __copy_table_t; - - typedef struct __zero_table { - uint32_t* dest; - uint32_t wlen; - } __zero_table_t; - - extern const __copy_table_t __copy_table_start__; - extern const __copy_table_t __copy_table_end__; - extern const __zero_table_t __zero_table_start__; - extern const __zero_table_t __zero_table_end__; - - for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = pTable->src[i]; +#ifndef __PROGRAM_START + /** + \brief Initializes data and bss sections + \details This default implementations initialized all data and additional bss + sections relying on .copy.table and .zero.table specified properly + in the used linker script. + + */ + __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) + { + extern void _start(void) __NO_RETURN; + + typedef struct __copy_table { + uint32_t const* src; + uint32_t* dest; + uint32_t wlen; + } __copy_table_t; + + typedef struct __zero_table { + uint32_t* dest; + uint32_t wlen; + } __zero_table_t; + + extern const __copy_table_t __copy_table_start__; + extern const __copy_table_t __copy_table_end__; + extern const __zero_table_t __zero_table_start__; + extern const __zero_table_t __zero_table_end__; + + for (__copy_table_t const* pTable = &__copy_table_start__; pTable < &__copy_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = pTable->src[i]; + } } - } - - for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { - for(uint32_t i=0u; iwlen; ++i) { - pTable->dest[i] = 0u; + + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { + for(uint32_t i=0u; iwlen; ++i) { + pTable->dest[i] = 0u; + } } + + _start(); } - - _start(); -} - -#define __PROGRAM_START __cmsis_start + + #define __PROGRAM_START __cmsis_start #endif #ifndef __INITIAL_SP -#define __INITIAL_SP __StackTop + #define __INITIAL_SP __StackTop #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT __StackLimit + #define __STACK_LIMIT __StackLimit #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __Vectors + #define __VECTOR_TABLE __Vectors #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) + #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) #endif #if (__ARM_FEATURE_CMSE == 3) -#ifndef __STACK_SEAL -#define __STACK_SEAL __StackSeal -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif - -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif - - -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif - - -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __ASM volatile ("nop") - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi":::"memory") - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __ASM volatile ("sev") - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -__STATIC_FORCEINLINE void __ISB(void) -{ - __ASM volatile ("isb 0xF":::"memory"); -} + #ifndef __STACK_SEAL + #define __STACK_SEAL __StackSeal + #endif + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif + + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -__STATIC_FORCEINLINE void __DSB(void) -{ - __ASM volatile ("dsb 0xF":::"memory"); -} - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -__STATIC_FORCEINLINE void __DMB(void) -{ - __ASM volatile ("dmb 0xF":::"memory"); -} - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) -{ - return __builtin_bswap32(value); -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) -{ - uint32_t result; - - __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return (result); -} - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) -{ - return (int16_t)__builtin_bswap16(value); -} - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) -{ - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); -} - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - -#if (__ARM_ARCH_ISA_THUMB >= 2) - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ -#endif - return (result); -} - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) -{ - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); -} - - -#if (__ARM_FEATURE_SAT >= 1) -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -__STATIC_FORCEINLINE void __CLREX(void) -{ - __ASM volatile ("clrex" ::: "memory"); -} - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint8_t) result); /* Add explicit type cast here */ -} - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) -{ - uint32_t result; - - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return (result); -} - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) -{ - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return (result); -} -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return (result); -} - -#endif /* (__ARM_ARCH >= 8) */ -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } +#endif /* ########################### Core Function Access ########################### */ @@ -880,28 +126,6 @@ __STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) @{ */ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} - - /** \brief Get Control Register \details Returns the content of the Control Register. @@ -917,18 +141,18 @@ __STATIC_FORCEINLINE uint32_t __get_CONTROL(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; + /** + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return (result); + } #endif @@ -945,16 +169,16 @@ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set - */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); - __ISB(); -} + /** + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set + */ + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); + } #endif @@ -1015,18 +239,18 @@ __STATIC_FORCEINLINE uint32_t __get_PSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return (result); + } #endif @@ -1042,15 +266,15 @@ __STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} + /** + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) + { + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + } #endif @@ -1069,18 +293,18 @@ __STATIC_FORCEINLINE uint32_t __get_MSP(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; + /** + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return (result); + } #endif @@ -1096,42 +320,42 @@ __STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} + /** + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) + { + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); + } #endif #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return (result); + } -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} + /** + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) + { + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + } #endif @@ -1150,18 +374,18 @@ __STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; + /** + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return (result); + } #endif @@ -1177,41 +401,19 @@ __STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} + /** + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask + */ + __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) + { + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + } #endif #if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - /** \brief Get Base Priority \details Returns the current value of the Base Priority register. @@ -1227,18 +429,18 @@ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } #endif @@ -1254,15 +456,15 @@ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } #endif @@ -1293,18 +495,18 @@ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } #endif @@ -1320,15 +522,15 @@ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ @@ -1348,7 +550,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ return (0U); #else @@ -1359,26 +561,26 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1395,7 +597,7 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ (void)ProcStackPtrLimit; #else @@ -1405,24 +607,24 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set - */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif -} + /** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ + __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + (void)ProcStackPtrLimit; + #else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); + #endif + } #endif @@ -1438,7 +640,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ return (0U); #else @@ -1450,26 +652,26 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1485,7 +687,7 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ (void)MainStackPtrLimit; #else @@ -1495,186 +697,30 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } #endif - #endif /* (__ARM_ARCH >= 8) */ -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - -/** @} end of CMSIS_Core_RegAccFunctions */ - - -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -#define __PKHTB(ARG1,ARG2,ARG3) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ - if (ARG3 == 0) \ - __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \ - else \ - __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ - __RES; \ - }) - -__STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) -{ - uint32_t result; - if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) - { - __ASM volatile("sxtb16 %0, %1, ROR %2" : "=r"(result) : "r"(op1), "i"(rotate)); - } - else - { - result = __SXTB16(__ROR(op1, rotate)); - } - return result; -} - -__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) -{ - uint32_t result; - if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) - { - __ASM volatile("sxtab16 %0, %1, %2, ROR %3" : "=r"(result) : "r"(op1), "r"(op2), "i"(rotate)); - } - else - { - result = __SXTAB16(op1, __ROR(op2, rotate)); - } - return result; -} - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ +/*@} end of CMSIS_Core_RegAccFunctions */ #pragma GCC diagnostic pop diff --git a/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h b/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h index 82f3b2378..29faeb48a 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_iccarm_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_iccarm_m.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.4.0 - * @date 20. January 2023 + * @version V6.0.0 + * @date 4. August 2023 ******************************************************************************/ //------------------------------------------------------------------------------ @@ -29,292 +29,54 @@ #ifndef __CMSIS_ICCARM_M_H__ #define __CMSIS_ICCARM_M_H__ -#ifndef __ICCARM__ - #error This file should only be compiled by ICCARM -#endif - -#pragma system_include - -#define __IAR_FT _Pragma("inline=forced") __intrinsic - -#if (__VER__ >= 8000000) - #define __ICCARM_V8 1 -#else - #define __ICCARM_V8 0 -#endif - -#ifndef __ALIGNED - #if __ICCARM_V8 - #define __ALIGNED(x) __attribute__((aligned(x))) - #elif (__VER__ >= 7080000) - /* Needs IAR language extensions */ - #define __ALIGNED(x) __attribute__((aligned(x))) - #else - #warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored. - #define __ALIGNED(x) - #endif -#endif - - -/* Define compiler macros for CPU architecture, used in CMSIS 5. - */ -#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__ -/* Macros already defined */ -#else - #if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #elif defined(__ARM8M_BASELINE__) - #define __ARM_ARCH_8M_BASE__ 1 - #elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M' - #if __ARM_ARCH == 6 - #define __ARM_ARCH_6M__ 1 - #elif __ARM_ARCH == 7 - #if __ARM_FEATURE_DSP - #define __ARM_ARCH_7EM__ 1 - #else - #define __ARM_ARCH_7M__ 1 - #endif - #endif /* __ARM_ARCH */ - #endif /* __ARM_ARCH_PROFILE == 'M' */ -#endif - -/* Alternativ core deduction for older ICCARM's */ -#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \ - !defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__) - #if defined(__ARM6M__) && (__CORE__ == __ARM6M__) - #define __ARM_ARCH_6M__ 1 - #elif defined(__ARM7M__) && (__CORE__ == __ARM7M__) - #define __ARM_ARCH_7M__ 1 - #elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__) - #define __ARM_ARCH_7EM__ 1 - #elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__) - #define __ARM_ARCH_8M_BASE__ 1 - #elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__) - #define __ARM_ARCH_8M_MAIN__ 1 - #else - #error "Unknown target." - #endif -#endif - - - -#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1 - #define __IAR_M0_FAMILY 1 -#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1 - #define __IAR_M0_FAMILY 1 -#else - #define __IAR_M0_FAMILY 0 -#endif - -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) -#endif - -#ifndef __ASM - #define __ASM __asm -#endif - -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif - -#ifndef __INLINE - #define __INLINE inline -#endif - -#ifndef __NO_RETURN - #if __ICCARM_V8 - #define __NO_RETURN __attribute__((__noreturn__)) - #else - #define __NO_RETURN _Pragma("object_attribute=__noreturn") - #endif -#endif - -#ifndef __PACKED - #if __ICCARM_V8 - #define __PACKED __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED __packed - #endif -#endif - -#ifndef __PACKED_STRUCT - #if __ICCARM_V8 - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_STRUCT __packed struct - #endif -#endif - -#ifndef __PACKED_UNION - #if __ICCARM_V8 - #define __PACKED_UNION union __attribute__((packed, aligned(1))) - #else - /* Needs IAR language extensions */ - #define __PACKED_UNION __packed union - #endif -#endif - -#ifndef __RESTRICT - #if __ICCARM_V8 - #define __RESTRICT __restrict - #else - /* Needs IAR language extensions */ - #define __RESTRICT restrict - #endif -#endif - -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static inline -#endif - -#ifndef __FORCEINLINE - #define __FORCEINLINE _Pragma("inline=forced") -#endif - -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE -#endif - -#ifndef __UNALIGNED_UINT16_READ -#pragma language=save -#pragma language=extended -__IAR_FT uint16_t __iar_uint16_read(void const *ptr) -{ - return *(__packed uint16_t*)(ptr); -} -#pragma language=restore -#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR) -#endif - - -#ifndef __UNALIGNED_UINT16_WRITE -#pragma language=save -#pragma language=extended -__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) -{ - *(__packed uint16_t*)(ptr) = val;; -} -#pragma language=restore -#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL) -#endif - -#ifndef __UNALIGNED_UINT32_READ -#pragma language=save -#pragma language=extended -__IAR_FT uint32_t __iar_uint32_read(void const *ptr) -{ - return *(__packed uint32_t*)(ptr); -} -#pragma language=restore -#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR) -#endif - -#ifndef __UNALIGNED_UINT32_WRITE -#pragma language=save -#pragma language=extended -__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) -{ - *(__packed uint32_t*)(ptr) = val;; -} -#pragma language=restore -#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL) +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" #endif -#ifndef __UNALIGNED_UINT32 /* deprecated */ -#pragma language=save -#pragma language=extended -__packed struct __iar_u32 { uint32_t v; }; -#pragma language=restore -#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v) -#endif - -#ifndef __USED - #if __ICCARM_V8 - #define __USED __attribute__((used)) - #else - #define __USED _Pragma("__root") - #endif +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM #endif -#undef __WEAK /* undo the definition from DLib_Defaults.h */ -#ifndef __WEAK - #if __ICCARM_V8 - #define __WEAK __attribute__((weak)) - #else - #define __WEAK _Pragma("__weak") - #endif -#endif #ifndef __PROGRAM_START -#define __PROGRAM_START __iar_program_start + #define __PROGRAM_START __iar_program_start #endif #ifndef __INITIAL_SP -#define __INITIAL_SP CSTACK$$Limit + #define __INITIAL_SP CSTACK$$Limit #endif #ifndef __STACK_LIMIT -#define __STACK_LIMIT CSTACK$$Base + #define __STACK_LIMIT CSTACK$$Base #endif #ifndef __VECTOR_TABLE -#define __VECTOR_TABLE __vector_table + #define __VECTOR_TABLE __vector_table #endif #ifndef __VECTOR_TABLE_ATTRIBUTE -#define __VECTOR_TABLE_ATTRIBUTE @".intvec" + #define __VECTOR_TABLE_ATTRIBUTE @".intvec" #endif #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) -#ifndef __STACK_SEAL -#define __STACK_SEAL STACKSEAL$$Base -#endif - -#ifndef __TZ_STACK_SEAL_SIZE -#define __TZ_STACK_SEAL_SIZE 8U -#endif + #ifndef __STACK_SEAL + #define __STACK_SEAL STACKSEAL$$Base + #endif -#ifndef __TZ_STACK_SEAL_VALUE -#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL -#endif + #ifndef __TZ_STACK_SEAL_SIZE + #define __TZ_STACK_SEAL_SIZE 8U + #endif -__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { - *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; -} -#endif + #ifndef __TZ_STACK_SEAL_VALUE + #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL + #endif -#ifndef __ICCARM_INTRINSICS_VERSION__ - #define __ICCARM_INTRINSICS_VERSION__ 0 + __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; + } #endif #if __ICCARM_INTRINSICS_VERSION__ == 2 - - #if defined(__CLZ) - #undef __CLZ - #endif - #if defined(__REVSH) - #undef __REVSH - #endif - #if defined(__RBIT) - #undef __RBIT - #endif - #if defined(__SSAT) - #undef __SSAT - #endif - #if defined(__USAT) - #undef __USAT - #endif - - #include "iccarm_builtin.h" - #define __disable_fault_irq __iar_builtin_disable_fiq #define __disable_irq __iar_builtin_disable_interrupt #define __enable_fault_irq __iar_builtin_enable_fiq @@ -362,11 +124,11 @@ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __arm_wsr("CONTROL", control); - __iar_builtin_ISB(); -} + __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) + { + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); + } #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) @@ -390,11 +152,11 @@ __STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __arm_wsr("CONTROL_NS", control); - __iar_builtin_ISB(); -} + __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) + { + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); + } #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) @@ -422,124 +184,6 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS")) #define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE))) - #define __NOP __iar_builtin_no_operation - - #define __CLZ __iar_builtin_CLZ - #define __CLREX __iar_builtin_CLREX - - #define __DMB __iar_builtin_DMB - #define __DSB __iar_builtin_DSB - #define __ISB __iar_builtin_ISB - - #define __LDREXB __iar_builtin_LDREXB - #define __LDREXH __iar_builtin_LDREXH - #define __LDREXW __iar_builtin_LDREX - - #define __RBIT __iar_builtin_RBIT - #define __REV __iar_builtin_REV - #define __REV16 __iar_builtin_REV16 - - __IAR_FT int16_t __REVSH(int16_t val) - { - return (int16_t) __iar_builtin_REVSH(val); - } - - #define __ROR __iar_builtin_ROR - #define __RRX __iar_builtin_RRX - - #define __SEV __iar_builtin_SEV - - #if !__IAR_M0_FAMILY - #define __SSAT __iar_builtin_SSAT - #endif - - #define __STREXB __iar_builtin_STREXB - #define __STREXH __iar_builtin_STREXH - #define __STREXW __iar_builtin_STREX - - #if !__IAR_M0_FAMILY - #define __USAT __iar_builtin_USAT - #endif - - #define __WFE __iar_builtin_WFE - #define __WFI __iar_builtin_WFI - - #if __ARM_MEDIA__ - #define __SADD8 __iar_builtin_SADD8 - #define __QADD8 __iar_builtin_QADD8 - #define __SHADD8 __iar_builtin_SHADD8 - #define __UADD8 __iar_builtin_UADD8 - #define __UQADD8 __iar_builtin_UQADD8 - #define __UHADD8 __iar_builtin_UHADD8 - #define __SSUB8 __iar_builtin_SSUB8 - #define __QSUB8 __iar_builtin_QSUB8 - #define __SHSUB8 __iar_builtin_SHSUB8 - #define __USUB8 __iar_builtin_USUB8 - #define __UQSUB8 __iar_builtin_UQSUB8 - #define __UHSUB8 __iar_builtin_UHSUB8 - #define __SADD16 __iar_builtin_SADD16 - #define __QADD16 __iar_builtin_QADD16 - #define __SHADD16 __iar_builtin_SHADD16 - #define __UADD16 __iar_builtin_UADD16 - #define __UQADD16 __iar_builtin_UQADD16 - #define __UHADD16 __iar_builtin_UHADD16 - #define __SSUB16 __iar_builtin_SSUB16 - #define __QSUB16 __iar_builtin_QSUB16 - #define __SHSUB16 __iar_builtin_SHSUB16 - #define __USUB16 __iar_builtin_USUB16 - #define __UQSUB16 __iar_builtin_UQSUB16 - #define __UHSUB16 __iar_builtin_UHSUB16 - #define __SASX __iar_builtin_SASX - #define __QASX __iar_builtin_QASX - #define __SHASX __iar_builtin_SHASX - #define __UASX __iar_builtin_UASX - #define __UQASX __iar_builtin_UQASX - #define __UHASX __iar_builtin_UHASX - #define __SSAX __iar_builtin_SSAX - #define __QSAX __iar_builtin_QSAX - #define __SHSAX __iar_builtin_SHSAX - #define __USAX __iar_builtin_USAX - #define __UQSAX __iar_builtin_UQSAX - #define __UHSAX __iar_builtin_UHSAX - #define __USAD8 __iar_builtin_USAD8 - #define __USADA8 __iar_builtin_USADA8 - #define __SSAT16 __iar_builtin_SSAT16 - #define __USAT16 __iar_builtin_USAT16 - #define __UXTB16 __iar_builtin_UXTB16 - #define __UXTAB16 __iar_builtin_UXTAB16 - #define __SXTB16 __iar_builtin_SXTB16 - #define __SXTAB16 __iar_builtin_SXTAB16 - #define __SMUAD __iar_builtin_SMUAD - #define __SMUADX __iar_builtin_SMUADX - #define __SMMLA __iar_builtin_SMMLA - #define __SMLAD __iar_builtin_SMLAD - #define __SMLADX __iar_builtin_SMLADX - #define __SMLALD __iar_builtin_SMLALD - #define __SMLALDX __iar_builtin_SMLALDX - #define __SMUSD __iar_builtin_SMUSD - #define __SMUSDX __iar_builtin_SMUSDX - #define __SMLSD __iar_builtin_SMLSD - #define __SMLSDX __iar_builtin_SMLSDX - #define __SMLSLD __iar_builtin_SMLSLD - #define __SMLSLDX __iar_builtin_SMLSLDX - #define __SEL __iar_builtin_SEL - #define __QADD __iar_builtin_QADD - #define __QSUB __iar_builtin_QSUB - #define __PKHBT __iar_builtin_PKHBT - #define __PKHTB __iar_builtin_PKHTB - #endif - -#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ - - #if __IAR_M0_FAMILY - /* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */ - #define __CLZ __cmsis_iar_clz_not_active - #define __SSAT __cmsis_iar_ssat_not_active - #define __USAT __cmsis_iar_usat_not_active - #define __RBIT __cmsis_iar_rbit_not_active - #define __get_APSR __cmsis_iar_get_APSR_not_active - #endif - #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) @@ -548,7 +192,7 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #endif #ifdef __INTRINSICS_INCLUDED - #error intrinsics.h is already included previously! + #error intrinsics.h is already included previously! #endif #include @@ -596,8 +240,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) return res; } - #endif - + #endif /* __IAR_M0_FAMILY */ +#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */ #if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ (defined (__FPU_USED ) && (__FPU_USED == 1U)) )) #undef __get_FPSCR @@ -615,30 +259,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __get_xPSR __get_PSR - #if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0) - - __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) - { - return __LDREX((unsigned long *)ptr); - } - - __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) - { - return __STREX(value, (unsigned long *)ptr); - } - #endif - - /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ #if (__CORTEX_M >= 0x03) - - __IAR_FT uint32_t __RRX(uint32_t value) - { - uint32_t result; - __ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value)); - return(result); - } - __IAR_FT void __set_BASEPRI_MAX(uint32_t value) { __asm volatile("MSR BASEPRI_MAX,%0"::"r" (value)); @@ -647,15 +269,8 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) #define __enable_fault_irq __enable_fiq #define __disable_fault_irq __disable_fiq - - #endif /* (__CORTEX_M >= 0x03) */ - __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) - { - return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); - } - #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) @@ -827,173 +442,15 @@ __STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) __asm volatile("MSR MSPLIM_NS,%0" :: "r" (value)); } - #endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ - + #endif /* (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ #endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */ -#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value)) #if __IAR_M0_FAMILY - __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) - { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; - } - __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) - { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; - } #endif -#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */ - - __IAR_FT uint8_t __LDRBT(volatile uint8_t *addr) - { - uint32_t res; - __ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDRHT(volatile uint16_t *addr) - { - uint32_t res; - __ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDRT(volatile uint32_t *addr) - { - uint32_t res; - __ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory"); - return res; - } - - __IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr) - { - __ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); - } - - __IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr) - { - __ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory"); - } - - __IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr) - { - __ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory"); - } - -#endif /* (__CORTEX_M >= 0x03) */ - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - - - __IAR_FT uint8_t __LDAB(volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDAH(volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDA(volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return res; - } - - __IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr) - { - __ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr) - { - __ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr) - { - __ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory"); - } - - __IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint8_t)res); - } - - __IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return ((uint16_t)res); - } - - __IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - - __IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) - { - uint32_t res; - __ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory"); - return res; - } - -#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */ - #undef __IAR_FT #undef __IAR_M0_FAMILY #undef __ICCARM_V8 diff --git a/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h b/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h index 1230a2f78..657cfadae 100644 --- a/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h +++ b/CMSIS/Core/Include/m-profile/cmsis_tiarmclang_m.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_tiarmclang_m.h * @brief CMSIS compiler tiarmclang header file - * @version V1.1.0 - * @date 27. July 2023 + * @version V6.0.0 + * @date 04. August 2023 ******************************************************************************/ /* * Copyright (c) 2023 Arm Limited. All rights reserved. @@ -27,88 +27,12 @@ #pragma clang system_header /* treat file as system include file */ -#if (__ARM_ACLE >= 200) - #include -#else - #error Compiler must support ACLE V2.0 -#endif /* (__ARM_ACLE >= 200) */ - -/* CMSIS compiler specific defines */ -#ifndef __ASM - #define __ASM __asm -#endif -#ifndef __INLINE - #define __INLINE __inline -#endif -#ifndef __STATIC_INLINE - #define __STATIC_INLINE static __inline -#endif -#ifndef __STATIC_FORCEINLINE - #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline -#endif -#ifndef __NO_RETURN - #define __NO_RETURN __attribute__((__noreturn__)) -#endif -#ifndef __USED - #define __USED __attribute__((used)) -#endif -#ifndef __WEAK - #define __WEAK __attribute__((weak)) -#endif -#ifndef __PACKED - #define __PACKED __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_STRUCT - #define __PACKED_STRUCT struct __attribute__((packed, aligned(1))) -#endif -#ifndef __PACKED_UNION - #define __PACKED_UNION union __attribute__((packed, aligned(1))) -#endif -#ifndef __UNALIGNED_UINT16_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_WRITE { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT16_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT16_READ { uint16_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v) -#endif -#ifndef __UNALIGNED_UINT32_WRITE - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_WRITE { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val)) -#endif -#ifndef __UNALIGNED_UINT32_READ - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wpacked" - __PACKED_STRUCT T_UINT32_READ { uint32_t v; }; - #pragma clang diagnostic pop - #define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v) -#endif -#ifndef __ALIGNED - #define __ALIGNED(x) __attribute__((aligned(x))) -#endif -#ifndef __RESTRICT - #define __RESTRICT __restrict -#endif -#ifndef __COMPILER_BARRIER - #define __COMPILER_BARRIER() __ASM volatile("":::"memory") -#endif -#ifndef __NO_INIT - #define __NO_INIT __attribute__ ((section (".bss.noinit"))) -#endif -#ifndef __ALIAS - #define __ALIAS(x) __attribute__ ((alias(x))) +#ifndef __CMSIS_TIARMCLANG_H + #error "This file must not be included directly" #endif /* ######################### Startup and Lowlevel Init ######################## */ + #ifndef __PROGRAM_START #define __PROGRAM_START _c_int00 #endif @@ -142,575 +66,18 @@ #define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL #endif - __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } #endif -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ - -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif - -/** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. - */ -#define __NOP() __nop() - - -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __wfi() - - -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __wfe() - - -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. - */ -#define __SEV() __sev() - - -/** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. - */ -#define __ISB() __isb(0xF) - - -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) - - -/** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. - */ -#define __DMB() __dmb(0xF) - - -/** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV(value) __rev(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REV16(value) __rev16(value) - - -/** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value - */ -#define __REVSH(value) __revsh(value) - - -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR(op1, op2) __ror(op1, op2) - - -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. - */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) - - -/** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value - */ -#define __RBIT(value) __rbit(value) - - -/** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value - */ -#define __CLZ(value) __clz(value) - - -/* __ARM_FEATURE_SAT is wrong for for Armv8-M Baseline devices */ -#if ((__ARM_FEATURE_SAT >= 1) && \ - (__ARM_ARCH_ISA_THUMB >= 2) ) -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -#define __SSAT(value, sat) __ssat(value, sat) - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -#define __USAT(value, sat) __usat(value, sat) - -#else /* (__ARM_FEATURE_SAT >= 1) */ -/** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value - */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) -{ - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return (max); - } - else if (val < min) - { - return (min); - } - } - return (val); -} - - -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return (max); - } - else if (val < 0) - { - return (0U); - } - } - return ((uint32_t)val); -} -#endif /* (__ARM_FEATURE_SAT >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 1) -/** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. - */ -#define __CLREX __builtin_arm_clrex - - -/** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXB (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 1) */ - - -#if (__ARM_FEATURE_LDREX >= 2) -/** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXH (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 2) */ - - -#if (__ARM_FEATURE_LDREX >= 4) -/** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex - - -/** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STREXW (uint32_t)__builtin_arm_strex -#endif /* (__ARM_FEATURE_LDREX >= 4) */ - - -#if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value - */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) -{ - uint32_t result; - - __ASM volatile ("rrx %0, %1" : "=r" (result) : "r" (value)); - return (result); -} - - -/** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return (result); -} - - -/** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); -} - - -/** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); -} -#endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ - - -#if (__ARM_ARCH >= 8) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t)result); /* Add explicit type cast here */ -} - - -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) -{ - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return (result); -} - - -/** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) -{ - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) -{ - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) -{ - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); -} - - -/** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex - - -/** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - - -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXB (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - - -/** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed - */ -#define __STLEX (uint32_t)__builtin_arm_stlex - -#endif /* (__ARM_ARCH >= 8) */ - -/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ - - /* ########################### Core Function Access ########################### */ /** \ingroup CMSIS_Core_FunctionInterface \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ */ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} -#endif - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting special-purpose register PRIMASK. - Can only be executed in Privileged modes. - */ -#ifndef __ARM_COMPAT_H -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} -#endif - /** \brief Get Control Register @@ -1000,28 +367,6 @@ __STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) #if (__ARM_ARCH_ISA_THUMB >= 2) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} - - /** \brief Get Base Priority \details Returns the current value of the Base Priority register. @@ -1037,18 +382,18 @@ __STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) -{ - uint32_t result; + /** + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return (result); + } #endif @@ -1064,15 +409,15 @@ __STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} + /** + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set + */ + __STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) + { + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + } #endif @@ -1103,18 +448,18 @@ __STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; + /** + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) + { + uint32_t result; - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return (result); -} + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return (result); + } #endif @@ -1130,15 +475,15 @@ __STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} + /** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ + __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) + { + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + } #endif #endif /* (__ARM_ARCH_ISA_THUMB >= 2) */ @@ -1158,7 +503,7 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ return (0U); #else @@ -1169,26 +514,26 @@ __STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) } #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1205,7 +550,7 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure PSPLIM is RAZ/WI */ (void)ProcStackPtrLimit; #else @@ -1225,9 +570,9 @@ __STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) */ __STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure PSPLIM is RAZ/WI */ + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure PSPLIM is RAZ/WI */ (void)ProcStackPtrLimit; #else __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); @@ -1248,7 +593,7 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ return (0U); #else @@ -1260,26 +605,26 @@ __STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - return (0U); -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return (result); -#endif -} + /** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ + __STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + return (0U); + #else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return (result); + #endif + } #endif @@ -1295,7 +640,7 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { #if (((__ARM_ARCH_8M_MAIN__ < 1) && \ (__ARM_ARCH_8_1M_MAIN__ < 1) ) && \ - (__ARM_FEATURE_CMSE < 3) ) + (__ARM_FEATURE_CMSE < 3) ) /* without main extensions, the non-secure MSPLIM is RAZ/WI */ (void)MainStackPtrLimit; #else @@ -1305,149 +650,27 @@ __STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) #if (__ARM_FEATURE_CMSE == 3) -/** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if ((__ARM_ARCH_8M_MAIN__ < 1) && \ - (__ARM_ARCH_8_1M_MAIN__ < 1) ) - /* without main extensions, the non-secure MSPLIM is RAZ/WI */ - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} + /** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ + __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) + { + #if ((__ARM_ARCH_8M_MAIN__ < 1) && \ + (__ARM_ARCH_8_1M_MAIN__ < 1) ) + /* without main extensions, the non-secure MSPLIM is RAZ/WI */ + (void)MainStackPtrLimit; + #else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); + #endif + } #endif - #endif /* (__ARM_ARCH >= 8) */ - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) -{ -#if (__ARM_FP >= 1) - return (__builtin_arm_get_fpscr()); -#else - return (0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) -{ -#if (__ARM_FP >= 1) - __builtin_arm_set_fpscr(fpscr); -#else - (void)fpscr; -#endif -} - - /** @} end of CMSIS_Core_RegAccFunctions */ -/* ################### Compiler specific Intrinsics ########################### */ -/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics - Access to dedicated SIMD instructions - @{ -*/ - -#if (__ARM_FEATURE_DSP == 1) -#define __SADD8 __sadd8 -#define __QADD8 __qadd8 -#define __SHADD8 __shadd8 -#define __UADD8 __uadd8 -#define __UQADD8 __uqadd8 -#define __UHADD8 __uhadd8 -#define __SSUB8 __ssub8 -#define __QSUB8 __qsub8 -#define __SHSUB8 __shsub8 -#define __USUB8 __usub8 -#define __UQSUB8 __uqsub8 -#define __UHSUB8 __uhsub8 -#define __SADD16 __sadd16 -#define __QADD16 __qadd16 -#define __SHADD16 __shadd16 -#define __UADD16 __uadd16 -#define __UQADD16 __uqadd16 -#define __UHADD16 __uhadd16 -#define __SSUB16 __ssub16 -#define __QSUB16 __qsub16 -#define __SHSUB16 __shsub16 -#define __USUB16 __usub16 -#define __UQSUB16 __uqsub16 -#define __UHSUB16 __uhsub16 -#define __SASX __sasx -#define __QASX __qasx -#define __SHASX __shasx -#define __UASX __uasx -#define __UQASX __uqasx -#define __UHASX __uhasx -#define __SSAX __ssax -#define __QSAX __qsax -#define __SHSAX __shsax -#define __USAX __usax -#define __UQSAX __uqsax -#define __UHSAX __uhsax -#define __USAD8 __usad8 -#define __USADA8 __usada8 -#define __SSAT16 __ssat16 -#define __USAT16 __usat16 -#define __UXTB16 __uxtb16 -#define __UXTAB16 __uxtab16 -#define __SXTB16 __sxtb16 -#define __SXTAB16 __sxtab16 -#define __SMUAD __smuad -#define __SMUADX __smuadx -#define __SMLAD __smlad -#define __SMLADX __smladx -#define __SMLALD __smlald -#define __SMLALDX __smlaldx -#define __SMUSD __smusd -#define __SMUSDX __smusdx -#define __SMLSD __smlsd -#define __SMLSDX __smlsdx -#define __SMLSLD __smlsld -#define __SMLSLDX __smlsldx -#define __SEL __sel -#define __QADD __qadd -#define __QSUB __qsub - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) - -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) - -#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) - -#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) - -__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) -{ - int32_t result; - - __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) ); - return (result); -} - -#endif /* (__ARM_FEATURE_DSP == 1) */ -/** @} end of group CMSIS_SIMD_intrinsics */ - - #endif /* __CMSIS_TIARMCLANG_M_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h b/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h new file mode 100644 index 000000000..f1a5faffc --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_armclang_r.h @@ -0,0 +1,161 @@ +/**************************************************************************//** + * @file cmsis_armclang_r.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_ARMCLANG_R_H +#define __CMSIS_ARMCLANG_R_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_ARMCLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#endif /* __CMSIS_ARMCLANG_R_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_clang_r.h b/CMSIS/Core/Include/r-profile/cmsis_clang_r.h new file mode 100644 index 000000000..dabcbc839 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_clang_r.h @@ -0,0 +1,161 @@ +/**************************************************************************//** + * @file cmsis_clang_r.h + * @brief CMSIS compiler armclang (Arm Compiler 6) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_CLANG_CORER_H +#define __CMSIS_CLANG_CORER_H + +#pragma clang system_header /* treat file as system include file */ + +#ifndef __CMSIS_CLANG_H + #error "This file must not be included directly" +#endif + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr; + uint32_t result; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV %1, sp \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr), "=r"(result) : : "memory" + ); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr; + __ASM volatile( + "MRS %0, cpsr \n" + "CPS #0x1F \n" // no effect in USR mode + "MOV sp, %1 \n" + "MSR cpsr_c, %0 \n" // no effect in USR mode + "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory" + ); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/** @} end of CMSIS_Core_RegAccFunctions */ + + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +#endif /* __CMSIS_CLANG_COREA_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h b/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h new file mode 100644 index 000000000..5b6c99355 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_gcc_r.h @@ -0,0 +1,163 @@ +/**************************************************************************//** + * @file cmsis_gcc_r.h + * @brief CMSIS compiler GCC header file + * @version V6.0.0 + * @date 4. August 2023 + ******************************************************************************/ +/* + * Copyright (c) 2009-2023 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMSIS_GCC_R_H +#define __CMSIS_GCC_R_H + +#ifndef __CMSIS_GCC_H + #error "This file must not be included directly" +#endif + +/* ignore some GCC warnings */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-conversion" +#pragma GCC diagnostic ignored "-Wconversion" +#pragma GCC diagnostic ignored "-Wunused-parameter" + + +/** \defgroup CMSIS_Core_intrinsics CMSIS Core Intrinsics + Access to dedicated SIMD instructions + @{ +*/ + +/** \brief Get CPSR Register + \return CPSR Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CPSR(void) +{ + uint32_t result; + __ASM volatile("MRS %0, cpsr" : "=r" (result) ); + return(result); +} + +/** \brief Set CPSR Register + \param [in] cpsr CPSR value to set + */ +__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) +{ + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); +} + +/** \brief Get Mode + \return Processor Mode + */ +__STATIC_FORCEINLINE uint32_t __get_mode(void) +{ + return (__get_CPSR() & 0x1FU); +} + +/** \brief Set Mode + \param [in] mode Mode value to set + */ +__STATIC_FORCEINLINE void __set_mode(uint32_t mode) +{ + __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); +} + +/** \brief Get Stack Pointer + \return Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP(void) +{ + uint32_t result; + __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); + return result; +} + +/** \brief Set Stack Pointer + \param [in] stack Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP(uint32_t stack) +{ + __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); +} + +/** \brief Get USR/SYS Stack Pointer + \return USR/SYS Stack Pointer value + */ +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) +{ + uint32_t cpsr = __get_CPSR(); + uint32_t result; + __ASM volatile( + "CPS #0x1F \n" + "MOV %0, sp " : "=r"(result) : : "memory" + ); + __set_CPSR(cpsr); + __ISB(); + return result; +} + +/** \brief Set USR/SYS Stack Pointer + \param [in] topOfProcStack USR/SYS Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack) +{ + uint32_t cpsr = __get_CPSR(); + __ASM volatile( + "CPS #0x1F \n" + "MOV sp, %0 " : : "r" (topOfProcStack) : "memory" + ); + __set_CPSR(cpsr); + __ISB(); +} + +/** \brief Get FPEXC + \return Floating Point Exception Control register value + */ +__STATIC_FORCEINLINE uint32_t __get_FPEXC(void) +{ +#if (__FPU_PRESENT == 1) + uint32_t result; + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); + return(result); +#else + return(0); +#endif +} + +/** \brief Set FPEXC + \param [in] fpexc Floating Point Exception Control value to set + */ +__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) +{ +#if (__FPU_PRESENT == 1) + __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); +#endif +} + +/* + * Include common core functions to access Coprocessor 15 registers + */ + +#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) +#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) + +/*@} end of group CMSIS_Core_intrinsics */ + +#pragma GCC diagnostic pop + +#endif /* __CMSIS_GCC_R_H */ diff --git a/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h b/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h new file mode 100644 index 000000000..0379428e7 --- /dev/null +++ b/CMSIS/Core/Include/r-profile/cmsis_iccarm_r.h @@ -0,0 +1,39 @@ +/**************************************************************************//** + * @file cmsis_iccarm_r.h + * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file + * @version V5.0.0 + * @date 04. December 2022 + ******************************************************************************/ + +//------------------------------------------------------------------------------ +// +// Copyright (c) 2018-2023 Arm Limited. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License") +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//------------------------------------------------------------------------------ + + +#ifndef __CMSIS_ICCARM_CORER_H__ +#define __CMSIS_ICCARM_CORER_H__ + +#ifndef __CMSIS_ICCARM_H__ + #error "This file must not be included directly" +#endif + +#ifndef __ICCARM__ + #error This file should only be compiled by ICCARM +#endif + +#endif /* __CMSIS_ICCARM_CORER_H__ */