0
mirror of https://github.com/OneOfEleven/uv-k5-firmware-custom.git synced 2025-06-20 15:08:37 +03:00

Initial commit

This commit is contained in:
OneOfEleven
2023-09-09 08:03:56 +01:00
parent 92305117f1
commit 54441e27d9
3388 changed files with 582553 additions and 0 deletions

View File

@ -0,0 +1,253 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
THUMB
AREA |.constdata|, DATA, READONLY
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
AREA |.text|, CODE, READONLY
SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
IF :DEF:RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
ENDIF
IF :DEF:RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
ENDIF
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP ; Branch if return stack is MSP
MRS R0,PSP ; Get PSP
SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
IF :DEF:RTX_SVC_PTR_CHECK
SUBS R1,R7,#0x01 ; Clear T-bit of function address
LSLS R2,R1,#29 ; Check if 8-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R7 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R2,R3} ; Restore SP and EXC_RETURN
MOV LR,R3 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
ENDIF
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R7 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
IF :DEF:RTX_STACK_CHECK
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
ENDIF
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4
IF :DEF:RTX_EXECUTION_ZONE
MOVS R3,#TCB_ZONE_OFS ; Get TCB.zone offset
LDRB R0,[R2,R3] ; Load osRtxInfo.thread.run.next: zone
CMP R1,#0
BEQ SVC_ZoneSetup ; Branch if running thread is deleted
LDRB R1,[R1,R3] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
BEQ SVC_ContextRestore_N ; Branch if zone has not changed
SVC_ZoneSetup
BL osZoneSetup_Callback ; Setup zone for next thread
ENDIF
SVC_ContextRestore_N
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
SVC_MSP
MRS R0,MSP ; Get MSP
B SVC_Number
SVC_Exit
BX LR ; Exit from handler
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] ; Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
BX R3 ; Return from handler
ALIGN
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
ALIGN
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
ALIGN
ENDP
IF :DEF:RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
B SVC_Context ; Branch to context handling
ALIGN
ENDP
ENDIF
END

View File

@ -0,0 +1,478 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv7-A Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
MODE_FIQ EQU 0x11
MODE_IRQ EQU 0x12
MODE_SVC EQU 0x13
MODE_ABT EQU 0x17
MODE_UND EQU 0x1B
CPSR_BIT_T EQU 0x20
K_STATE_RUNNING EQU 2 ; osKernelState_t::osKernelRunning
I_K_STATE_OFS EQU 8 ; osRtxInfo.kernel.state offset
I_TICK_IRQN_OFS EQU 16 ; osRtxInfo.tick_irqn offset
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_FRAME EQU 34 ; osRtxThread_t.stack_frame offset
TCB_SP_OFS EQU 56 ; osRtxThread_t.sp offset
TCB_ZONE_OFS EQU 68 ; osRtxThread_t.zone offset
PRESERVE8
ARM
AREA |.constdata|, DATA, READONLY
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
AREA |.data|, DATA, READWRITE
EXPORT SVC_Active
EXPORT IRQ_PendSV
IRQ_NestLevel DCD 0 ; IRQ nesting level counter
SVC_Active DCB 0 ; SVC Handler Active
IRQ_PendSV DCB 0 ; Pending SVC flag
AREA |.text|, CODE, READONLY
Undef_Handler\
PROC
EXPORT Undef_Handler
IMPORT CUndefHandler
SRSFD SP!, #MODE_UND
PUSH {R0-R4, R12} ; Save APCS corruptible registers to UND mode stack
MRS R0, SPSR
TST R0, #CPSR_BIT_T ; Check mode
MOVEQ R1, #4 ; R1 = 4 ARM mode
MOVNE R1, #2 ; R1 = 2 Thumb mode
SUB R0, LR, R1
LDREQ R0, [R0] ; ARM mode - R0 points to offending instruction
BEQ Undef_Cont
; Thumb instruction
; Determine if it is a 32-bit Thumb instruction
LDRH R0, [R0]
MOV R2, #0x1C
CMP R2, R0, LSR #11
BHS Undef_Cont ; 16-bit Thumb instruction
; 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
LDRH R2, [LR]
ORR R0, R2, R0, LSL #16
Undef_Cont
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
; R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
BL CUndefHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
LDR LR, [SP, #24] ; Restore stacked LR and possibly adjust for retry
SUB LR, LR, R0
LDR R0, [SP, #28] ; Restore stacked SPSR
MSR SPSR_CXSF, R0
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers
ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
MOVS PC, LR
ENDP
PAbt_Handler\
PROC
EXPORT PAbt_Handler
IMPORT CPAbtHandler
SUB LR, LR, #4 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 1 ; IFSR
MRC p15, 0, R1, c6, c0, 2 ; IFAR
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
BL CPAbtHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stack APCS registers
RFEFD SP! ; Return from exception
ENDP
DAbt_Handler\
PROC
EXPORT DAbt_Handler
IMPORT CDAbtHandler
SUB LR, LR, #8 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 0 ; DFSR
MRC p15, 0, R1, c6, c0, 0 ; DFAR
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
BL CDAbtHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception
ENDP
IRQ_Handler\
PROC
EXPORT IRQ_Handler
IMPORT IRQ_GetActiveIRQ
IMPORT IRQ_GetHandler
IMPORT IRQ_EndOfInterrupt
SUB LR, LR, #4 ; Pre-adjust LR
SRSFD SP!, #MODE_SVC ; Save LR_irq and SPSR_irq on to the SVC stack
CPS #MODE_SVC ; Change to SVC mode
PUSH {R0-R3, R12, LR} ; Save APCS corruptible registers
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 ; Increment IRQ nesting level
STR R1, [R0]
MOV R3, SP ; Move SP into R3
AND R3, R3, #4 ; Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R3 ; Adjust stack
PUSH {R3, R4} ; Store stack adjustment(R3) and user data(R4)
BLX IRQ_GetActiveIRQ ; Retrieve interrupt ID into R0
MOV R4, R0 ; Move interrupt ID to R4
BLX IRQ_GetHandler ; Retrieve interrupt handler address for current ID
CMP R0, #0 ; Check if handler address is 0
BEQ IRQ_End ; If 0, end interrupt and return
CPSIE i ; Re-enable interrupts
BLX R0 ; Call IRQ handler
CPSID i ; Disable interrupts
IRQ_End
MOV R0, R4 ; Move interrupt ID to R0
BLX IRQ_EndOfInterrupt ; Signal end of interrupt
POP {R3, R4} ; Restore stack adjustment(R3) and user data(R4)
ADD SP, SP, R3 ; Unadjust stack
BL osRtxContextSwitch ; Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUBS R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0]
CLREX ; Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from IRQ handler
ENDP
SVC_Handler\
PROC
EXPORT SVC_Handler
IMPORT IRQ_Disable
IMPORT IRQ_Enable
IMPORT osRtxUserSVC
IMPORT osRtxInfo
SRSFD SP!, #MODE_SVC ; Store SPSR_svc and LR_svc onto SVC stack
PUSH {R12, LR}
MRS R12, SPSR ; Load SPSR
TST R12, #CPSR_BIT_T ; Thumb bit set?
LDRHNE R12, [LR,#-2] ; Thumb: load halfword
BICNE R12, R12, #0xFF00 ; extract SVC number
LDREQ R12, [LR,#-4] ; ARM: load word
BICEQ R12, R12, #0xFF000000 ; extract SVC number
CMP R12, #0 ; Compare SVC number
BNE SVC_User ; Branch if User SVC
PUSH {R0-R3} ; Push arguments to stack
LDR R0, =SVC_Active
MOV R1, #1
STRB R1, [R0] ; Set SVC Handler Active
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 ; Increment IRQ nesting level
STR R1, [R0]
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] ; Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING ; Check osKernelRunning
BLT SVC_FuncCall ; Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
BLX IRQ_Disable ; Disable OS Tick interrupt
SVC_FuncCall
LDM SP, {R0-R3, R12} ; Reload R0-R3 and R12 from stack
CPSIE i ; Re-enable interrupts
BLX R12 ; Branch to SVC function
CPSID i ; Disable interrupts
STR R0, [SP] ; Store function return value
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] ; Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING ; Check osKernelRunning
BLT SVC_ContextCheck ; Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
BLX IRQ_Enable ; Enable OS Tick interrupt
SVC_ContextCheck
BL osRtxContextSwitch ; Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUB R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0]
LDR R0, =SVC_Active
MOV R1, #0
STRB R1, [R0] ; Clear SVC Handler Active
CLREX ; Clear exclusive monitor
POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception
SVC_User
PUSH {R4, R5}
LDR R5,=osRtxUserSVC ; Load address of SVC table
LDR R4,[R5] ; Load SVC maximum number
CMP R12,R4 ; Check SVC number range
BHI SVC_Done ; Branch if out of range
LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
BLX R12 ; Call SVC Function
SVC_Done
CLREX ; Clear exclusive monitor
POP {R4, R5, R12, LR}
RFEFD SP! ; Return from exception
ENDP
osRtxContextSwitch\
PROC
EXPORT osRtxContextSwitch
IMPORT osRtxPendSV_Handler
IMPORT osRtxInfo
IF :DEF:RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
ENDIF
IMPORT IRQ_Disable
IMPORT IRQ_Enable
PUSH {LR}
; Check interrupt nesting level
LDR R0, =IRQ_NestLevel
LDR R1, [R0] ; Load IRQ nest level
CMP R1, #1
BNE osRtxContextExit ; Nesting interrupts, exit context switcher
LDR R12, =osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.run
LDM R12, {R0, R1} ; Load osRtxInfo.thread.run: curr & next
LDR R2, =IRQ_PendSV ; Load address of IRQ_PendSV flag
LDRB R3, [R2] ; Load PendSV flag
CMP R0, R1 ; Check if context switch is required
BNE osRtxContextCheck ; Not equal, check if context save required
CMP R3, #1 ; Compare IRQ_PendSV value
BNE osRtxContextExit ; No post processing (and no context switch requested)
osRtxContextCheck
STR R1, [R12] ; Store run.next as run.curr
; R0 = curr, R1 = next, R2 = &IRQ_PendSV, R12 = &osRtxInfo.thread.run
PUSH {R0-R2, R12}
CMP R0, #0 ; Is osRtxInfo.thread.run.curr == 0
BEQ osRtxPostProcess ; Current deleted, skip context save
osRtxContextSave
MOV LR, R0 ; Move &osRtxInfo.thread.run.curr to LR
MOV R0, SP ; Move SP_svc into R0
ADD R0, R0, #20 ; Adjust SP_svc to R0 of the basic frame
SUB SP, SP, #4
STM SP, {SP}^ ; Save SP_usr to current stack
POP {R1} ; Pop SP_usr into R1
SUB R1, R1, #64 ; Adjust SP_usr to R4 of the basic frame
STMIA R1!, {R4-R11} ; Save R4-R11 to user stack
LDMIA R0!, {R4-R8} ; Load stacked R0-R3,R12 into R4-R8
STMIA R1!, {R4-R8} ; Store them to user stack
STM R1, {LR}^ ; Store LR_usr directly
ADD R1, R1, #4 ; Adjust user sp to PC
LDMIB R0!, {R5-R6} ; Load stacked PC, CPSR
STMIA R1!, {R5-R6} ; Store them to user stack
SUB R1, R1, #64 ; Adjust SP_usr to stacked R4
; Check if VFP state need to be saved
MRC p15, 0, R2, c1, c0, 2 ; VFP/NEON access enabled? (CPACR)
AND R2, R2, #0x00F00000
CMP R2, #0x00F00000
BNE osRtxContextSave1 ; Continue, no VFP
VMRS R2, FPSCR
STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
VSTMDB R1!, {D0-D15} ; Save D0-D15
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VSTMDB R1!, {D16-D31} ; Save D16-D31
ENDIF
LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
ORR R2, R2, #4 ; NEON state
ELSE
ORR R2, R2, #2 ; VFP state
ENDIF
STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state
osRtxContextSave1
STR R1, [LR, #TCB_SP_OFS] ; Store user sp to osRtxInfo.thread.run.curr
osRtxPostProcess
; RTX IRQ post processing check
POP {R8-R11} ; Pop R8 = curr, R9 = next, R10 = &IRQ_PendSV, R11 = &osRtxInfo.thread.run
LDRB R0, [R10] ; Load PendSV flag
CMP R0, #1 ; Compare PendSV value
BNE osRtxContextRestore ; Skip post processing if not pending
MOV R4, SP ; Move SP_svc into R4
AND R4, R4, #4 ; Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R4 ; Adjust stack
; Disable OS Tick
LDR R5, =osRtxInfo ; Load address of osRtxInfo
LDR R5, [R5, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
MOV R0, R5 ; Set it as function parameter
BLX IRQ_Disable ; Disable OS Tick interrupt
MOV R6, #0 ; Set PendSV clear value
B osRtxPendCheck
osRtxPendExec
STRB R6, [R10] ; Clear PendSV flag
CPSIE i ; Re-enable interrupts
BLX osRtxPendSV_Handler ; Post process pending objects
CPSID i ; Disable interrupts
osRtxPendCheck
LDR R9, [R11, #4] ; Load osRtxInfo.thread.run.next
STR R9, [R11] ; Store run.next as run.curr
LDRB R0, [R10] ; Load PendSV flag
CMP R0, #1 ; Compare PendSV value
BEQ osRtxPendExec ; Branch to PendExec if PendSV is set
; Re-enable OS Tick
MOV R0, R5 ; Restore irqn as function parameter
BLX IRQ_Enable ; Enable OS Tick interrupt
ADD SP, SP, R4 ; Restore stack adjustment
osRtxContextRestore
IF :DEF:RTX_EXECUTION_ZONE
LDRB R0, [R9, #TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.next: zone
CMP R8, #0
BEQ osRtxZoneSetup ; Branch if running thread is deleted
LDRB R1, [R8, #TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.curr: zone
CMP R0, R1 ; Check if next:zone == curr:zone
BEQ osRtxContextRestoreFrame ; Branch if zone has not changed
osRtxZoneSetup
BL osZoneSetup_Callback ; Setup zone for next thread
ENDIF
osRtxContextRestoreFrame
LDR LR, [R8, #TCB_SP_OFS] ; Load next osRtxThread_t.sp
LDRB R2, [R8, #TCB_SP_FRAME] ; Load next osRtxThread_t.stack_frame
ANDS R2, R2, #0x6 ; Check stack frame for VFP context
MRC p15, 0, R2, c1, c0, 2 ; Read CPACR
ANDEQ R2, R2, #0xFF0FFFFF ; VFP/NEON state not stacked, disable VFP/NEON
ORRNE R2, R2, #0x00F00000 ; VFP/NEON state is stacked, enable VFP/NEON
MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
BEQ osRtxContextRestore1 ; No VFP
ISB ; Sync if VFP was enabled
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VLDMIA LR!, {D16-D31} ; Restore D16-D31
ENDIF
VLDMIA LR!, {D0-D15} ; Restore D0-D15
LDR R2, [LR]
VMSR FPSCR, R2 ; Restore FPSCR
ADD LR, LR, #8 ; Adjust sp pointer to R4
osRtxContextRestore1
LDMIA LR!, {R4-R11} ; Restore R4-R11
ADD R12, LR, #32 ; Adjust sp and save it into R12
PUSH {R12} ; Push sp onto stack
LDM SP, {SP}^ ; Restore SP_usr directly
ADD SP, SP, #4 ; Adjust SP_svc
LDMIA LR!, {R0-R3, R12} ; Load user registers R0-R3,R12
STMIB SP!, {R0-R3, R12} ; Store them to SP_svc
LDM LR, {LR}^ ; Restore LR_usr directly
LDMIB LR!, {R0-R1} ; Load user registers PC,CPSR
ADD SP, SP, #4
STMIB SP!, {R0-R1} ; Store them to SP_svc
SUB SP, SP, #32 ; Adjust SP_svc to stacked LR
osRtxContextExit
POP {PC} ; Return
ENDP
END

View File

@ -0,0 +1,279 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
IF ({FPU}="FPv4-SP") || ({FPU}="VFPv4_D16") || ({FPU}="VFPv4_SP_D16") || ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16")
FPU_USED EQU 1
ELSE
FPU_USED EQU 0
ENDIF
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
THUMB
AREA |.constdata|, DATA, READONLY
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
AREA |.text|, CODE, READONLY
SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
IF :DEF:RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
ENDIF
IF :DEF:RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
ENDIF
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP ; Get MSP if return stack is MSP
MRSNE R0,PSP ; Get PSP if return stack is PSP
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
IF :DEF:RTX_SVC_PTR_CHECK
LDR R12,[R0,#16] ; Load function address from stack
SUB R1,R12,#1 ; Clear T-bit of function address
LSLS R2,R1,#30 ; Check if 4-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R12 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R12,LR} ; Restore SP and EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
ENDIF
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF FPU_USED != 0
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
ELSE
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
ENDIF
SVC_ContextSave
IF :DEF:RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
IF FPU_USED != 0
MOV R4,R1 ; Assign osRtxInfo.thread.run.curr to R4
ENDIF
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
IF FPU_USED != 0
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
ELSE
B SVC_ContextRestore ; Branch to context restore handling
ENDIF
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
LDRSB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
ENDIF
STM R12,{R4-R11} ; Save R4..R11
ELSE
STMDB R12!,{R4-R11} ; Save R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
ENDIF
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4, clear Z flag
IF :DEF:RTX_EXECUTION_ZONE
LDRB R0,[R2,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.next: zone
CBZ R1,SVC_ZoneSetup ; Branch if running thread is deleted (Z flag unchanged)
LDRB R1,[R1,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
SVC_ZoneSetup
IT NE ; If zone has changed or running thread is deleted
BLNE osZoneSetup_Callback ; Setup zone for next thread
ENDIF
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
ELSE
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
ENDIF
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] ; Load address of SVC function
LDM R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
BX LR ; Return from handler
ALIGN
ENDP
PendSV_Handler PROC
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
SysTick_Handler PROC
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
IF :DEF:RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
ENDIF
END

View File

@ -0,0 +1,255 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv6-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
.syntax unified
#include "rtx_def.h"
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_ZONE_OFS, 68 // TCB.zone offset
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.equ osRtxErrorSVC, 6 // Invalid SVC function called
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
.byte 0
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
.type SVC_Handler, %function
.global SVC_Handler
.fnstart
.cantunwind
SVC_Handler:
mov r0,lr
lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2
bcc SVC_MSP // Branch if return stack is MSP
mrs r0,psp // Get PSP
SVC_Number:
ldr r1,[r0,#24] // Load saved PC from stack
subs r1,r1,#2 // Point to SVC instruction
ldrb r1,[r1] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
subs r1,r7,#0x01 // Clear T-bit of function address
lsls r2,r1,#29 // Check if 8-byte aligned
beq SVC_PtrBoundsCheck // Branch if address is aligned
SVC_PtrInvalid:
push {r0,lr} // Save SP and EXC_RETURN
movs r0,#osRtxErrorSVC // Parameter: code
mov r1,r7 // Parameter: object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
pop {r2,r3} // Restore SP and EXC_RETURN
mov lr,r3 // Set EXC_RETURN
b SVC_Context // Branch to context handling
SVC_PtrBoundsCheck:
ldr r2,=Image$$RTX_SVC_VENEERS$$Base
ldr r3,=Image$$RTX_SVC_VENEERS$$Length
subs r2,r1,r2 // Subtract SVC table base address
cmp r2,r3 // Compare with SVC table boundaries
bhs SVC_PtrInvalid // Branch if address is out of bounds
#endif // RTX_SVC_PTR_CHECK
push {r0,lr} // Save SP and EXC_RETURN
ldmia r0,{r0-r3} // Load function parameters from stack
blx r7 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
mov lr,r3 // Set EXC_RETURN
SVC_Context:
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
beq SVC_Exit // Branch when threads are the same
subs r3,r3,#8 // Adjust address
str r2,[r3] // osRtxInfo.thread.run: curr = next
cmp r1,#0
beq SVC_ContextRestore // Branch if running thread is deleted
SVC_ContextSave:
mrs r0,psp // Get PSP
subs r0,r0,#32 // Calculate SP: space for R4..R11
str r0,[r1,#TCB_SP_OFS] // Store SP
#ifdef RTX_STACK_CHECK
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cmp r0,#0
bne SVC_ContextSaveRegs // Branch when stack check is ok
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
movs r1,#0 // Simulate deleted running thread
b SVC_ContextRestore // Branch to context restore handling
SVC_ContextSaveRegs:
ldr r0,[r1,#TCB_SP_OFS] // Load SP
#endif // RTX_STACK_CHECK
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
SVC_ContextRestore:
movs r4,r2 // Assign osRtxInfo.thread.run.next to R4
#ifdef RTX_EXECUTION_ZONE
movs r3,#TCB_ZONE_OFS // Get TCB.zone offset
ldrb r0,[r2,r3] // Load osRtxInfo.thread.run.next: zone
cmp r1,#0
beq SVC_ZoneSetup // Branch if running thread is deleted
ldrb r1,[r1,r3] // Load osRtxInfo.thread.run.curr: zone
cmp r0,r1 // Check if next:zone == curr:zone
beq SVC_ContextRestore_N // Branch if zone has not changed
SVC_ZoneSetup:
bl osZoneSetup_Callback // Setup zone for next thread
#endif // RTX_EXECUTION_ZONE
SVC_ContextRestore_N:
ldr r0,[r4,#TCB_SP_OFS] // Load SP
adds r0,r0,#16 // Adjust address
ldmia r0!,{r4-r7} // Restore R8..R11
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
msr psp,r0 // Set PSP
subs r0,r0,#32 // Adjust address
ldmia r0!,{r4-r7} // Restore R4..R7
movs r0,#2 // Binary complement of 0xFFFFFFFD
mvns r0,r0 // Set EXC_RETURN value
bx r0 // Exit from handler
SVC_MSP:
mrs r0,msp // Get MSP
b SVC_Number
SVC_Exit:
bx lr // Exit from handler
SVC_User:
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
push {r0,lr} // Save SP and EXC_RETURN
lsls r1,r1,#2
ldr r3,[r2,r1] // Load address of SVC function
mov r12,r3
ldmia r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
bx r3 // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
.thumb_func
.type PendSV_Handler, %function
.global PendSV_Handler
.fnstart
.cantunwind
PendSV_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
.thumb_func
.type SysTick_Handler, %function
.global SysTick_Handler
.fnstart
.cantunwind
SysTick_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
#ifdef RTX_SAFETY_FEATURES
.thumb_func
.type osFaultResume, %function
.global osFaultResume
.fnstart
.cantunwind
osFaultResume:
b SVC_Context // Branch to context handling
.fnend
.size osFaultResume, .-osFaultResume
#endif // RTX_SAFETY_FEATURES
.end

View File

@ -0,0 +1,485 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv7-A Exception handlers
*
* -----------------------------------------------------------------------------
*/
.syntax unified
#include "rtx_def.h"
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ CPSR_BIT_T, 0x20
.equ K_STATE_RUNNING, 2 // osKernelState_t::osKernelRunning
.equ I_K_STATE_OFS, 8 // osRtxInfo.kernel.state offset
.equ I_TICK_IRQN_OFS, 16 // osRtxInfo.tick_irqn offset
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_FRAME, 34 // osRtxThread_t.stack_frame offset
.equ TCB_SP_OFS, 56 // osRtxThread_t.sp offset
.equ TCB_ZONE_OFS, 68 // osRtxThread_t.zone offset
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
.byte 0
.section ".data"
.global SVC_Active
.global IRQ_PendSV
IRQ_NestLevel:
.word 0 // IRQ nesting level counter
SVC_Active:
.byte 0 // SVC Handler Active
IRQ_PendSV:
.byte 0 // Pending SVC flag
.arm
.section ".text"
.align 4
.type Undef_Handler, %function
.global Undef_Handler
.fnstart
.cantunwind
Undef_Handler:
srsfd sp!, #MODE_UND
push {r0-r4, r12} // Save APCS corruptible registers to UND mode stack
mrs r0, spsr
tst r0, #CPSR_BIT_T // Check mode
moveq r1, #4 // R1 = 4 ARM mode
movne r1, #2 // R1 = 2 Thumb mode
sub r0, lr, r1
ldreq r0, [r0] // ARM mode - R0 points to offending instruction
beq Undef_Cont
// Thumb instruction
// Determine if it is a 32-bit Thumb instruction
ldrh r0, [r0]
mov r2, #0x1C
cmp r2, r0, lsr #11
bhs Undef_Cont // 16-bit Thumb instruction
// 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
ldrh r2, [lr]
orr r0, r2, r0, lsl #16
Undef_Cont:
mov r2, lr // Set LR to third argument
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
// R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
bl CUndefHandler
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
ldr lr, [sp, #24] // Restore stacked LR and possibly adjust for retry
sub lr, lr, r0
ldr r0, [sp, #28] // Restore stacked SPSR
msr spsr_cxsf, r0
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stacked APCS registers
add sp, sp, #8 // Adjust SP for already-restored banked registers
movs pc, lr
.fnend
.size Undef_Handler, .-Undef_Handler
.type PAbt_Handler, %function
.global PAbt_Handler
.fnstart
.cantunwind
PAbt_Handler:
sub lr, lr, #4 // Pre-adjust LR
srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack
push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack
mrc p15, 0, r0, c5, c0, 1 // IFSR
mrc p15, 0, r1, c6, c0, 2 // IFAR
mov r2, lr // Set LR to third argument
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
bl CPAbtHandler
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stack APCS registers
rfefd sp! // Return from exception
.fnend
.size PAbt_Handler, .-PAbt_Handler
.type DAbt_Handler, %function
.global DAbt_Handler
.fnstart
.cantunwind
DAbt_Handler:
sub lr, lr, #8 // Pre-adjust LR
srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack
push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack
mrc p15, 0, r0, c5, c0, 0 // DFSR
mrc p15, 0, r1, c6, c0, 0 // DFAR
mov r2, lr // Set LR to third argument
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
bl CDAbtHandler
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stacked APCS registers
rfefd sp! // Return from exception
.fnend
.size DAbt_Handler, .-DAbt_Handler
.type IRQ_Handler, %function
.global IRQ_Handler
.fnstart
.cantunwind
IRQ_Handler:
sub lr, lr, #4 // Pre-adjust LR
srsfd sp!, #MODE_SVC // Save LR_irq and SPSR_irq on to the SVC stack
cps #MODE_SVC // Change to SVC mode
push {r0-r3, r12, lr} // Save APCS corruptible registers
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
add r1, r1, #1 // Increment IRQ nesting level
str r1, [r0]
mov r3, sp // Move SP into R3
and r3, r3, #4 // Get stack adjustment to ensure 8-byte alignment
sub sp, sp, r3 // Adjust stack
push {r3, r4} // Store stack adjustment(R3) and user data(R4)
blx IRQ_GetActiveIRQ // Retrieve interrupt ID into R0
mov r4, r0 // Move interrupt ID to R4
blx IRQ_GetHandler // Retrieve interrupt handler address for current ID
cmp r0, #0 // Check if handler address is 0
beq IRQ_End // If 0, end interrupt and return
cpsie i // Re-enable interrupts
blx r0 // Call IRQ handler
cpsid i // Disable interrupts
IRQ_End:
mov r0, r4 // Move interrupt ID to R0
blx IRQ_EndOfInterrupt // Signal end of interrupt
pop {r3, r4} // Restore stack adjustment(R3) and user data(R4)
add sp, sp, r3 // Unadjust stack
bl osRtxContextSwitch // Continue in context switcher
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
subs r1, r1, #1 // Decrement IRQ nesting level
str r1, [r0]
clrex // Clear exclusive monitor for interrupted code
pop {r0-r3, r12, lr} // Restore stacked APCS registers
rfefd sp! // Return from IRQ handler
.fnend
.size IRQ_Handler, .-IRQ_Handler
.type SVC_Handler, %function
.global SVC_Handler
.fnstart
.cantunwind
SVC_Handler:
srsfd sp!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack
push {r12, lr}
mrs r12, spsr // Load SPSR
tst r12, #CPSR_BIT_T // Thumb bit set?
ldrhne r12, [lr,#-2] // Thumb: load halfword
bicne r12, r12, #0xFF00 // extract SVC number
ldreq r12, [lr,#-4] // ARM: load word
biceq r12, r12, #0xFF000000 // extract SVC number
cmp r12, #0 // Compare SVC number
bne SVC_User // Branch if User SVC
push {r0-r3} // Push arguments to stack
ldr r0, =SVC_Active
mov r1, #1
strb r1, [r0] // Set SVC Handler Active
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
add r1, r1, #1 // Increment IRQ nesting level
str r1, [r0]
ldr r0, =osRtxInfo
ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state
cmp r1, #K_STATE_RUNNING // Check osKernelRunning
blt SVC_FuncCall // Continue if kernel is not running
ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
blx IRQ_Disable // Disable OS Tick interrupt
SVC_FuncCall:
ldm sp, {r0-r3, r12} // Reload R0-R3 and R12 from stack
cpsie i // Re-enable interrupts
blx r12 // Branch to SVC function
cpsid i // Disable interrupts
str r0, [sp] // Store function return value
ldr r0, =osRtxInfo
ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state
cmp r1, #K_STATE_RUNNING // Check osKernelRunning
blt SVC_ContextCheck // Continue if kernel is not running
ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
blx IRQ_Enable // Enable OS Tick interrupt
SVC_ContextCheck:
bl osRtxContextSwitch // Continue in context switcher
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
sub r1, r1, #1 // Decrement IRQ nesting level
str r1, [r0]
ldr r0, =SVC_Active
mov r1, #0
strb r1, [r0] // Clear SVC Handler Active
clrex // Clear exclusive monitor
pop {r0-r3, r12, lr} // Restore stacked APCS registers
rfefd sp! // Return from exception
SVC_User:
push {r4, r5}
ldr r5,=osRtxUserSVC // Load address of SVC table
ldr r4,[r5] // Load SVC maximum number
cmp r12,r4 // Check SVC number range
bhi SVC_Done // Branch if out of range
ldr r12,[r5,r12,lsl #2] // Load SVC Function Address
blx r12 // Call SVC Function
SVC_Done:
clrex // Clear exclusive monitor
pop {r4, r5, r12, lr}
rfefd sp! // Return from exception
.fnend
.size SVC_Handler, .-SVC_Handler
.type osRtxContextSwitch, %function
.global osRtxContextSwitch
.fnstart
.cantunwind
osRtxContextSwitch:
push {lr}
// Check interrupt nesting level
ldr r0, =IRQ_NestLevel
ldr r1, [r0] // Load IRQ nest level
cmp r1, #1
bne osRtxContextExit // Nesting interrupts, exit context switcher
ldr r12, =osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
ldm r12, {r0, r1} // Load osRtxInfo.thread.run: curr & next
ldr r2, =IRQ_PendSV // Load address of IRQ_PendSV flag
ldrb r3, [r2] // Load PendSV flag
cmp r0, r1 // Check if context switch is required
bne osRtxContextCheck // Not equal, check if context save required
cmp r3, #1 // Compare IRQ_PendSV value
bne osRtxContextExit // No post processing (and no context switch requested)
osRtxContextCheck:
str r1, [r12] // Store run.next as run.curr
// R0 = curr, R1 = next, R2 = &IRQ_PendSV, R12 = &osRtxInfo.thread.run
push {r0-r2, r12}
cmp r0, #0 // Is osRtxInfo.thread.run.curr == 0
beq osRtxPostProcess // Current deleted, skip context save
osRtxContextSave:
mov lr, r0 // Move &osRtxInfo.thread.run.curr to LR
mov r0, sp // Move SP_svc into R0
add r0, r0, #20 // Adjust SP_svc to R0 of the basic frame
sub sp, sp, #4
stm sp, {sp}^ // Save SP_usr to current stack
pop {r1} // Pop SP_usr into R1
sub r1, r1, #64 // Adjust SP_usr to R4 of the basic frame
stmia r1!, {r4-r11} // Save R4-R11 to user stack
ldmia r0!, {r4-r8} // Load stacked R0-R3,R12 into R4-R8
stmia r1!, {r4-r8} // Store them to user stack
stm r1, {lr}^ // Store LR_usr directly
add r1, r1, #4 // Adjust user sp to PC
ldmib r0!, {r5-r6} // Load stacked PC, CPSR
stmia r1!, {r5-r6} // Store them to user stack
sub r1, r1, #64 // Adjust SP_usr to stacked R4
// Check if VFP state need to be saved
mrc p15, 0, r2, c1, c0, 2 // VFP/NEON access enabled? (CPACR)
and r2, r2, #0x00F00000
cmp r2, #0x00F00000
bne osRtxContextSaveSP // Continue, no VFP
vmrs r2, fpscr
stmdb r1!, {r2,r12} // Push FPSCR, maintain 8-byte alignment
vstmdb r1!, {d0-d15} // Save D0-D15
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
vstmdb r1!, {d16-d31} // Save D16-D31
#endif
ldrb r2, [lr, #TCB_SP_FRAME] // Load osRtxInfo.thread.run.curr frame info
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
orr r2, r2, #4 // NEON state
#else
orr r2, r2, #2 // VFP state
#endif
strb r2, [lr, #TCB_SP_FRAME] // Store VFP/NEON state
osRtxContextSaveSP:
str r1, [lr, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr
osRtxPostProcess:
// RTX IRQ post processing check
pop {r8-r11} // Pop R8 = curr, R9 = next, R10 = &IRQ_PendSV, R11 = &osRtxInfo.thread.run
ldrb r0, [r10] // Load PendSV flag
cmp r0, #1 // Compare PendSV value
bne osRtxContextRestore // Skip post processing if not pending
mov r4, sp // Move SP_svc into R4
and r4, r4, #4 // Get stack adjustment to ensure 8-byte alignment
sub sp, sp, r4 // Adjust stack
// Disable OS Tick
ldr r5, =osRtxInfo // Load address of osRtxInfo
ldr r5, [r5, #I_TICK_IRQN_OFS] // Load OS Tick irqn
mov r0, r5 // Set it as function parameter
blx IRQ_Disable // Disable OS Tick interrupt
mov r6, #0 // Set PendSV clear value
b osRtxPendCheck
osRtxPendExec:
strb r6, [r10] // Clear PendSV flag
cpsie i // Re-enable interrupts
blx osRtxPendSV_Handler // Post process pending objects
cpsid i // Disable interrupts
osRtxPendCheck:
ldr r9, [r11, #4] // Load osRtxInfo.thread.run.next
str r9, [r11] // Store run.next as run.curr
ldrb r0, [r10] // Load PendSV flag
cmp r0, #1 // Compare PendSV value
beq osRtxPendExec // Branch to PendExec if PendSV is set
// Re-enable OS Tick
mov r0, r5 // Restore irqn as function parameter
blx IRQ_Enable // Enable OS Tick interrupt
add sp, sp, r4 // Restore stack adjustment
osRtxContextRestore:
#ifdef RTX_EXECUTION_ZONE
ldrb r0, [r9, #TCB_ZONE_OFS] // Load osRtxInfo.thread.run.next: zone
cmp r8, #0
beq osRtxZoneSetup // Branch if running thread is deleted
ldrb r1, [r8, #TCB_ZONE_OFS] // Load osRtxInfo.thread.run.curr: zone
cmp r0, r1 // Check if next:zone == curr:zone
beq osRtxContextRestoreFrame // Branch if zone has not changed
osRtxZoneSetup:
bl osZoneSetup_Callback // Setup zone for next thread
#endif
osRtxContextRestoreFrame:
ldr lr, [r9, #TCB_SP_OFS] // Load next osRtxThread_t.sp
ldrb r2, [r9, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame
ands r2, r2, #0x6 // Check stack frame for VFP context
mrc p15, 0, r2, c1, c0, 2 // Read CPACR
andeq r2, r2, #0xFF0FFFFF // VFP/NEON state not stacked, disable VFP/NEON
orrne r2, r2, #0x00F00000 // VFP/NEON state is stacked, enable VFP/NEON
mcr p15, 0, r2, c1, c0, 2 // Write CPACR
beq osRtxContextRestoreRegs // No VFP
isb // Sync if VFP was enabled
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
vldmia lr!, {d16-d31} // Restore D16-D31
#endif
vldmia lr!, {d0-d15} // Restore D0-D15
ldr r2, [lr]
vmsr fpscr, r2 // Restore FPSCR
add lr, lr, #8 // Adjust sp pointer to R4
osRtxContextRestoreRegs:
ldmia lr!, {r4-r11} // Restore R4-R11
add r12, lr, #32 // Adjust sp and save it into R12
push {r12} // Push sp onto stack
ldm sp, {sp}^ // Restore SP_usr directly
add sp, sp, #4 // Adjust SP_svc
ldmia lr!, {r0-r3, r12} // Load user registers R0-R3,R12
stmib sp!, {r0-r3, r12} // Store them to SP_svc
ldm lr, {lr}^ // Restore LR_usr directly
ldmib lr!, {r0-r1} // Load user registers PC,CPSR
add sp, sp, #4
stmib sp!, {r0-r1} // Store them to SP_svc
sub sp, sp, #32 // Adjust SP_svc to stacked LR
osRtxContextExit:
pop {pc} // Return
.fnend
.size osRtxContextSwitch, .-osRtxContextSwitch
.end

View File

@ -0,0 +1,281 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv7-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
.syntax unified
#include "rtx_def.h"
#if (defined(__ARM_FP) && (__ARM_FP > 0))
.equ FPU_USED, 1
#else
.equ FPU_USED, 0
#endif
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ TCB_ZONE_OFS, 68 // TCB.zone offset
.equ FPCCR, 0xE000EF34 // FPCCR Address
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.equ osRtxErrorSVC, 6 // Invalid SVC function called
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
.byte 0
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
.type SVC_Handler, %function
.global SVC_Handler
.fnstart
.cantunwind
SVC_Handler:
tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2
ite eq
mrseq r0,msp // Get MSP if return stack is MSP
mrsne r0,psp // Get PSP if return stack is PSP
ldr r1,[r0,#24] // Load saved PC from stack
ldrb r1,[r1,#-2] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
ldr r12,[r0,#16] // Load function address from stack
sub r1,r12,#1 // Clear T-bit of function address
lsls r2,r1,#30 // Check if 4-byte aligned
beq SVC_PtrBoundsCheck // Branch if address is aligned
SVC_PtrInvalid:
push {r0,lr} // Save SP and EXC_RETURN
movs r0,#osRtxErrorSVC // Parameter: code
mov r1,r12 // Parameter: object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
pop {r12,lr} // Restore SP and EXC_RETURN
b SVC_Context // Branch to context handling
SVC_PtrBoundsCheck:
ldr r2,=Image$$RTX_SVC_VENEERS$$Base
ldr r3,=Image$$RTX_SVC_VENEERS$$Length
subs r2,r1,r2 // Subtract SVC table base address
cmp r2,r3 // Compare with SVC table boundaries
bhs SVC_PtrInvalid // Branch if address is out of bounds
#endif // RTX_SVC_PTR_CHECK
push {r0,lr} // Save SP and EXC_RETURN
ldm r0,{r0-r3,r12} // Load function parameters and address from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
SVC_Context:
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
it eq
bxeq lr // Exit when threads are the same
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
cbnz r1,SVC_ContextSave // Branch if running thread is not deleted
SVC_FP_LazyState:
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
bne SVC_ContextRestore // Branch if not extended stack frame
ldr r3,=FPCCR // FPCCR Address
ldr r0,[r3] // Load FPCCR
bic r0,r0,#1 // Clear LSPACT (Lazy state preservation)
str r0,[r3] // Store FPCCR
b SVC_ContextRestore // Branch to context restore handling
.else
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
.endif
SVC_ContextSave:
#ifdef RTX_STACK_CHECK
sub r12,r12,#32 // Calculate SP: space for R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
subeq r12,r12,#64 // Additional space for S16..S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
.if (FPU_USED != 0)
mov r4,r1 // Assign osRtxInfo.thread.run.curr to R4
.endif
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
movs r1,#0 // Simulate deleted running thread
.if (FPU_USED != 0)
ldrsb lr,[r4,#TCB_SF_OFS] // Load stack frame information
b SVC_FP_LazyState // Branch to FP lazy state handling
.else
b SVC_ContextRestore // Branch to context restore handling
.endif
SVC_ContextSaveRegs:
ldr r12,[r1,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrsb lr, [r1,#TCB_SF_OFS] // Load stack frame information
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmiaeq r12!,{s16-s31} // Save VFP S16..S31
.endif
stm r12,{r4-r11} // Save R4..R11
#else
stmdb r12!,{r4-r11} // Save R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmdbeq r12!,{s16-s31} // Save VFP S16.S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
movs r4,r2 // Assign osRtxInfo.thread.run.next to R4, clear Z flag
#ifdef RTX_EXECUTION_ZONE
ldrb r0,[r2,#TCB_ZONE_OFS] // Load osRtxInfo.thread.run.next: zone
cbz r1,SVC_ZoneSetup // Branch if running thread is deleted (Z flag unchanged)
ldrb r1,[r1,#TCB_ZONE_OFS] // Load osRtxInfo.thread.run.curr: zone
cmp r0,r1 // Check if next:zone == curr:zone
SVC_ZoneSetup:
it ne // If zone has changed or running thread is deleted
blne osZoneSetup_Callback // Setup zone for next thread
#endif // RTX_EXECUTION_ZONE
ldr r0,[r4,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrsb lr,[r4,#TCB_SF_OFS] // Load stack frame information
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31
.else
mvn lr,#~0xFFFFFFFD // Set EXC_RETURN value
.endif
ldmia r0!,{r4-r11} // Restore R4..R11
msr psp,r0 // Set PSP
SVC_Exit:
bx lr // Exit from handler
SVC_User:
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
push {r0,lr} // Save SP and EXC_RETURN
ldr r12,[r2,r1,lsl #2] // Load address of SVC function
ldm r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
.thumb_func
.type PendSV_Handler, %function
.global PendSV_Handler
.fnstart
.cantunwind
PendSV_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
.thumb_func
.type SysTick_Handler, %function
.global SysTick_Handler
.fnstart
.cantunwind
SysTick_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
#ifdef RTX_SAFETY_FEATURES
.thumb_func
.type osFaultResume, %function
.global osFaultResume
.fnstart
.cantunwind
osFaultResume:
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size osFaultResume, .-osFaultResume
#endif // RTX_SAFETY_FEATURES
.end

View File

@ -0,0 +1,310 @@
/*
* Copyright (c) 2016-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv8-M Baseline Exception handlers
*
* -----------------------------------------------------------------------------
*/
.syntax unified
#include "rtx_def.h"
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SM_OFS, 48 // TCB.stack_mem offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ TCB_TZM_OFS, 64 // TCB.tz_memory offset
.equ TCB_ZONE_OFS,68 // TCB.zone offset
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.equ osRtxErrorSVC, 6 // Invalid SVC function called
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
.byte 0
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
.type SVC_Handler, %function
.global SVC_Handler
.fnstart
.cantunwind
SVC_Handler:
mov r0,lr
lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2
bcc SVC_MSP // Branch if return stack is MSP
mrs r0,psp // Get PSP
SVC_Number:
ldr r1,[r0,#24] // Load saved PC from stack
subs r1,r1,#2 // Point to SVC instruction
ldrb r1,[r1] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
subs r1,r7,#0x01 // Clear T-bit of function address
lsls r2,r1,#29 // Check if 8-byte aligned
beq SVC_PtrBoundsCheck // Branch if address is aligned
SVC_PtrInvalid:
push {r0,lr} // Save SP and EXC_RETURN
movs r0,#osRtxErrorSVC // Parameter: code
mov r1,r7 // Parameter: object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
pop {r2,r3} // Restore SP and EXC_RETURN
mov lr,r3 // Set EXC_RETURN
b SVC_Context // Branch to context handling
SVC_PtrBoundsCheck:
ldr r2,=Image$$RTX_SVC_VENEERS$$Base
ldr r3,=Image$$RTX_SVC_VENEERS$$Length
subs r2,r1,r2 // Subtract SVC table base address
cmp r2,r3 // Compare with SVC table boundaries
bhs SVC_PtrInvalid // Branch if address is out of bounds
#endif // RTX_SVC_PTR_CHECK
push {r0,lr} // Save SP and EXC_RETURN
ldmia r0,{r0-r3} // Load function parameters from stack
blx r7 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
mov lr,r3 // Set EXC_RETURN
SVC_Context:
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
beq SVC_Exit // Branch when threads are the same
subs r3,r3,#8 // Adjust address
str r2,[r3] // osRtxInfo.thread.run: curr = next
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
SVC_ContextSave:
#ifdef RTX_TZ_CONTEXT
mov r3,lr // Get EXC_RETURN
ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextSave_NS // Branch if there is no secure context
push {r0-r3} // Save registers
bl TZ_StoreContext_S // Store secure context
pop {r0-r3} // Restore registers
mov lr,r3 // Set EXC_RETURN
#endif
SVC_ContextSave_NS:
mrs r0,psp // Get PSP
#if (DOMAIN_NS != 0)
mov r3,lr // Get EXC_RETURN
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextSaveSP // Branch if secure
#endif
#ifdef RTX_STACK_CHECK
subs r0,r0,#32 // Calculate SP: space for R4..R11
SVC_ContextSaveSP:
str r0,[r1,#TCB_SP_OFS] // Store SP
mov r3,lr // Get EXC_RETURN
movs r0,#TCB_SF_OFS // Get TCB.stack_frame offset
strb r3,[r1,r0] // Store stack frame information
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
movs r1,#0 // Simulate deleted running thread
b SVC_ContextRestore // Branch to context restore handling
SVC_ContextSaveRegs:
#if (DOMAIN_NS != 0)
movs r0,#TCB_SF_OFS // Get TCB.stack_frame offset
ldrsb r3,[r1,r0] // Load stack frame information
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextRestore // Branch if secure
#endif
ldr r0,[r1,#TCB_SP_OFS] // Load SP
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
#else
subs r0,r0,#32 // Calculate SP: space for R4..R11
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
subs r0,r0,#32 // Adjust address
SVC_ContextSaveSP:
str r0,[r1,#TCB_SP_OFS] // Store SP
mov r3,lr // Get EXC_RETURN
movs r0,#TCB_SF_OFS // Get TCB.stack_frame offset
strb r3,[r1,r0] // Store stack frame information
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
movs r4,r2 // Assign osRtxInfo.thread.run.next to R4
#ifdef RTX_EXECUTION_ZONE
movs r3,#TCB_ZONE_OFS // Get TCB.zone offset
ldrb r0,[r2,r3] // Load osRtxInfo.thread.run.next: zone
cbz r1,SVC_ZoneSetup // Branch if running thread is deleted
ldrb r1,[r1,r3] // Load osRtxInfo.thread.run.curr: zone
cmp r0,r1 // Check if next:zone == curr:zone
beq SVC_ContextRestore_S // Branch if zone has not changed
SVC_ZoneSetup:
bl osZoneSetup_Callback // Setup zone for next thread
#endif // RTX_EXECUTION_ZONE
SVC_ContextRestore_S:
#ifdef RTX_TZ_CONTEXT
ldr r0,[r4,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextRestore_NS // Branch if there is no secure context
bl TZ_LoadContext_S // Load secure context
#endif
SVC_ContextRestore_NS:
ldr r0,[r4,#TCB_SM_OFS] // Load stack memory base
msr psplim,r0 // Set PSPLIM
movs r0,#TCB_SF_OFS // Get TCB.stack_frame offset
ldrsb r3,[r4,r0] // Load stack frame information
mov lr,r3 // Set EXC_RETURN
ldr r0,[r4,#TCB_SP_OFS] // Load SP
#if (DOMAIN_NS != 0)
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextRestoreSP // Branch if secure
#endif
adds r0,r0,#16 // Adjust address
ldmia r0!,{r4-r7} // Restore R8..R11
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
subs r0,r0,#32 // Adjust address
ldmia r0!,{r4-r7} // Restore R4..R7
adds r0,r0,#16 // Adjust address
SVC_ContextRestoreSP:
msr psp,r0 // Set PSP
SVC_Exit:
bx lr // Exit from handler
SVC_MSP:
mrs r0,msp // Get MSP
b SVC_Number
SVC_User:
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
push {r0,lr} // Save SP and EXC_RETURN
lsls r1,r1,#2
ldr r3,[r2,r1] // Load address of SVC function
mov r12,r3
ldmia r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
bx r3 // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
.thumb_func
.type PendSV_Handler, %function
.global PendSV_Handler
.fnstart
.cantunwind
PendSV_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
.thumb_func
.type SysTick_Handler, %function
.global SysTick_Handler
.fnstart
.cantunwind
SysTick_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
#ifdef RTX_SAFETY_FEATURES
.thumb_func
.type osFaultResume, %function
.global osFaultResume
.fnstart
.cantunwind
osFaultResume:
b SVC_Context // Branch to context handling
.fnend
.size osFaultResume, .-osFaultResume
#endif // RTX_SAFETY_FEATURES
.end

View File

@ -0,0 +1,324 @@
/*
* Copyright (c) 2016-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv8-M Mainline Exception handlers
*
* -----------------------------------------------------------------------------
*/
.syntax unified
#include "rtx_def.h"
#if (defined(__ARM_FP) && (__ARM_FP > 0))
.equ FPU_USED, 1
#else
.equ FPU_USED, 0
#endif
#if (defined(__ARM_FEATURE_MVE) && (__ARM_FEATURE_MVE > 0))
.equ MVE_USED, 1
#else
.equ MVE_USED, 0
#endif
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SM_OFS, 48 // TCB.stack_mem offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ TCB_TZM_OFS, 64 // TCB.tz_memory offset
.equ TCB_ZONE_OFS,68 // TCB.zone offset
.equ FPCCR, 0xE000EF34 // FPCCR Address
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.equ osRtxErrorSVC, 6 // Invalid SVC function called
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
.byte 0
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
.type SVC_Handler, %function
.global SVC_Handler
.fnstart
.cantunwind
SVC_Handler:
tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2
ite eq
mrseq r0,msp // Get MSP if return stack is MSP
mrsne r0,psp // Get PSP if return stack is PSP
ldr r1,[r0,#24] // Load saved PC from stack
ldrb r1,[r1,#-2] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
ldr r12,[r0,#16] // Load function address from stack
sub r1,r12,#1 // Clear T-bit of function address
lsls r2,r1,#30 // Check if 4-byte aligned
beq SVC_PtrBoundsCheck // Branch if address is aligned
SVC_PtrInvalid:
push {r0,lr} // Save SP and EXC_RETURN
movs r0,#osRtxErrorSVC // Parameter: code
mov r1,r12 // Parameter: object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
pop {r12,lr} // Restore SP and EXC_RETURN
b SVC_Context // Branch to context handling
SVC_PtrBoundsCheck:
ldr r2,=Image$$RTX_SVC_VENEERS$$Base
ldr r3,=Image$$RTX_SVC_VENEERS$$Length
subs r2,r1,r2 // Subtract SVC table base address
cmp r2,r3 // Compare with SVC table boundaries
bhs SVC_PtrInvalid // Branch if address is out of bounds
#endif // RTX_SVC_PTR_CHECK
push {r0,lr} // Save SP and EXC_RETURN
ldm r0,{r0-r3,r12} // Load function parameters and address from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
SVC_Context:
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
it eq
bxeq lr // Exit when threads are the same
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0) || (MVE_USED != 0)
cbnz r1,SVC_ContextSave // Branch if running thread is not deleted
SVC_FP_LazyState:
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
bne SVC_ContextRestore // Branch if not extended stack frame
ldr r3,=FPCCR // FPCCR Address
ldr r0,[r3] // Load FPCCR
bic r0,r0,#1 // Clear LSPACT (Lazy state preservation)
str r0,[r3] // Store FPCCR
b SVC_ContextRestore // Branch to context restore handling
.else
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
.endif
SVC_ContextSave:
#ifdef RTX_TZ_CONTEXT
ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextSave_NS // Branch if there is no secure context
push {r1,r2,r12,lr} // Save registers and EXC_RETURN
bl TZ_StoreContext_S // Store secure context
pop {r1,r2,r12,lr} // Restore registers and EXC_RETURN
#endif
SVC_ContextSave_NS:
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
bne SVC_ContextSaveSP // Branch if secure
#endif
#ifdef RTX_STACK_CHECK
sub r12,r12,#32 // Calculate SP: space for R4..R11
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
subeq r12,r12,#64 // Additional space for S16..S31
.endif
SVC_ContextSaveSP:
str r12,[r1,#TCB_SP_OFS] // Store SP
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
.if (FPU_USED != 0) || (MVE_USED != 0)
mov r4,r1 // Assign osRtxInfo.thread.run.curr to R4
.endif
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
movs r1,#0 // Simulate deleted running thread
.if (FPU_USED != 0) || (MVE_USED != 0)
ldrsb lr,[r4,#TCB_SF_OFS] // Load stack frame information
b SVC_FP_LazyState // Branch to FP lazy state handling
.else
b SVC_ContextRestore // Branch to context restore handling
.endif
SVC_ContextSaveRegs:
ldrsb lr,[r1,#TCB_SF_OFS] // Load stack frame information
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
bne SVC_ContextRestore // Branch if secure
#endif
ldr r12,[r1,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmiaeq r12!,{s16-s31} // Save VFP S16..S31
.endif
stm r12,{r4-r11} // Save R4..R11
#else
stmdb r12!,{r4-r11} // Save R4..R11
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmdbeq r12!,{s16-s31} // Save VFP S16.S31
.endif
SVC_ContextSaveSP:
str r12,[r1,#TCB_SP_OFS] // Store SP
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
movs r4,r2 // Assign osRtxInfo.thread.run.next to R4, clear Z flag
#ifdef RTX_EXECUTION_ZONE
ldrb r0,[r2,#TCB_ZONE_OFS] // Load osRtxInfo.thread.run.next: zone
cbz r1,SVC_ZoneSetup // Branch if running thread is deleted (Z flag unchanged)
ldrb r1,[r1,#TCB_ZONE_OFS] // Load osRtxInfo.thread.run.curr: zone
cmp r0,r1 // Check if next:zone == curr:zone
SVC_ZoneSetup:
it ne // If zone has changed or running thread is deleted
blne osZoneSetup_Callback // Setup zone for next thread
#endif // RTX_EXECUTION_ZONE
#ifdef RTX_TZ_CONTEXT
ldr r0,[r4,#TCB_TZM_OFS] // Load TrustZone memory identifier
cmp r0,#0
it ne // If TrustZone memory allocated
blne TZ_LoadContext_S // Load secure context
#endif
ldr r0,[r4,#TCB_SP_OFS] // Load SP
ldr r1,[r4,#TCB_SM_OFS] // Load stack memory base
msr psplim,r1 // Set PSPLIM
ldrsb lr,[r4,#TCB_SF_OFS] // Load stack frame information
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
itt ne // If secure
msrne psp,r0 // Set PSP
bxne lr // Exit from handler
#endif
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31
.endif
ldmia r0!,{r4-r11} // Restore R4..R11
msr psp,r0 // Set PSP
SVC_Exit:
bx lr // Exit from handler
SVC_User:
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
push {r0,lr} // Save SP and EXC_RETURN
ldr r12,[r2,r1,lsl #2] // Load address of SVC function
ldm r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
.thumb_func
.type PendSV_Handler, %function
.global PendSV_Handler
.fnstart
.cantunwind
PendSV_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
.thumb_func
.type SysTick_Handler, %function
.global SysTick_Handler
.fnstart
.cantunwind
SysTick_Handler:
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
#ifdef RTX_SAFETY_FEATURES
.thumb_func
.type osFaultResume, %function
.global osFaultResume
.fnstart
.cantunwind
osFaultResume:
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size osFaultResume, .-osFaultResume
#endif // RTX_SAFETY_FEATURES
.end

View File

@ -0,0 +1,249 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv6m.s
#include "rtx_def.h"
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
THUMB
SECTION .text:CODE:NOROOT(2)
SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
#endif
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP ; Branch if return stack is MSP
MRS R0,PSP ; Get PSP
SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
SUBS R1,R7,#0x01 ; Clear T-bit of function address
LSLS R2,R1,#29 ; Check if 8-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R7 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R2,R3} ; Restore SP and EXC_RETURN
MOV LR,R3 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
#endif
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R7 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
#ifdef RTX_STACK_CHECK
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
#endif
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4
#ifdef RTX_EXECUTION_ZONE
MOVS R3,#TCB_ZONE_OFS ; Get TCB.zone offset
LDRB R0,[R2,R3] ; Load osRtxInfo.thread.run.next: zone
CMP R1,#0
BEQ SVC_ZoneSetup ; Branch if running thread is deleted
LDRB R1,[R1,R3] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
BEQ SVC_ContextRestore_N ; Branch if zone has not changed
SVC_ZoneSetup
BL osZoneSetup_Callback ; Setup zone for next thread
#endif
SVC_ContextRestore_N
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
SVC_MSP
MRS R0,MSP ; Get MSP
B SVC_Number
SVC_Exit
BX LR ; Exit from handler
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] ; Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
BX R3 ; Return from handler
PendSV_Handler
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
SysTick_Handler
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
#ifdef RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
B SVC_Context ; Branch to context handling
ALIGN
ENDP
#endif
END

View File

@ -0,0 +1,465 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv7-A Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv7a.s
#include "rtx_def.h"
MODE_FIQ EQU 0x11
MODE_IRQ EQU 0x12
MODE_SVC EQU 0x13
MODE_ABT EQU 0x17
MODE_UND EQU 0x1B
CPSR_BIT_T EQU 0x20
K_STATE_RUNNING EQU 2 ; osKernelState_t::osKernelRunning
I_K_STATE_OFS EQU 8 ; osRtxInfo.kernel.state offset
I_TICK_IRQN_OFS EQU 16 ; osRtxInfo.tick_irqn offset
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_FRAME EQU 34 ; osRtxThread_t.stack_frame offset
TCB_SP_OFS EQU 56 ; osRtxThread_t.sp offset
TCB_ZONE_OFS EQU 68 ; osRtxThread_t.zone offset
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
SECTION .data:DATA:NOROOT(2)
EXPORT SVC_Active
EXPORT IRQ_PendSV
IRQ_NestLevel DCD 0 ; IRQ nesting level counter
SVC_Active DCB 0 ; SVC Handler Active
IRQ_PendSV DCB 0 ; Pending SVC flag
SECTION .text:CODE:NOROOT(2)
Undef_Handler
EXPORT Undef_Handler
IMPORT CUndefHandler
SRSFD SP!, #MODE_UND
PUSH {R0-R4, R12} ; Save APCS corruptible registers to UND mode stack
MRS R0, SPSR
TST R0, #CPSR_BIT_T ; Check mode
MOVEQ R1, #4 ; R1 = 4 ARM mode
MOVNE R1, #2 ; R1 = 2 Thumb mode
SUB R0, LR, R1
LDREQ R0, [R0] ; ARM mode - R0 points to offending instruction
BEQ Undef_Cont
; Thumb instruction
; Determine if it is a 32-bit Thumb instruction
LDRH R0, [R0]
MOV R2, #0x1C
CMP R2, R0, LSR #11
BHS Undef_Cont ; 16-bit Thumb instruction
; 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
LDRH R2, [LR]
ORR R0, R2, R0, LSL #16
Undef_Cont
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
; R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
BL CUndefHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
LDR LR, [SP, #24] ; Restore stacked LR and possibly adjust for retry
SUB LR, LR, R0
LDR R0, [SP, #28] ; Restore stacked SPSR
MSR SPSR_CXSF, R0
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers
ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
MOVS PC, LR
PAbt_Handler
EXPORT PAbt_Handler
IMPORT CPAbtHandler
SUB LR, LR, #4 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 1 ; IFSR
MRC p15, 0, R1, c6, c0, 2 ; IFAR
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
BL CPAbtHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stack APCS registers
RFEFD SP! ; Return from exception
DAbt_Handler
EXPORT DAbt_Handler
IMPORT CDAbtHandler
SUB LR, LR, #8 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 0 ; DFSR
MRC p15, 0, R1, c6, c0, 0 ; DFAR
MOV R2, LR ; Set LR to third argument
AND R12, SP, #4 ; Ensure stack is 8-byte aligned
SUB SP, SP, R12 ; Adjust stack
PUSH {R12, LR} ; Store stack adjustment and dummy LR
BL CDAbtHandler
POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception
IRQ_Handler
EXPORT IRQ_Handler
IMPORT IRQ_GetActiveIRQ
IMPORT IRQ_GetHandler
IMPORT IRQ_EndOfInterrupt
SUB LR, LR, #4 ; Pre-adjust LR
SRSFD SP!, #MODE_SVC ; Save LR_irq and SPSR_irq on to the SVC stack
CPS #MODE_SVC ; Change to SVC mode
PUSH {R0-R3, R12, LR} ; Save APCS corruptible registers
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 ; Increment IRQ nesting level
STR R1, [R0]
MOV R3, SP ; Move SP into R3
AND R3, R3, #4 ; Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R3 ; Adjust stack
PUSH {R3, R4} ; Store stack adjustment(R3) and user data(R4)
BLX IRQ_GetActiveIRQ ; Retrieve interrupt ID into R0
MOV R4, R0 ; Move interrupt ID to R4
BLX IRQ_GetHandler ; Retrieve interrupt handler address for current ID
CMP R0, #0 ; Check if handler address is 0
BEQ IRQ_End ; If 0, end interrupt and return
CPSIE i ; Re-enable interrupts
BLX R0 ; Call IRQ handler
CPSID i ; Disable interrupts
IRQ_End
MOV R0, R4 ; Move interrupt ID to R0
BLX IRQ_EndOfInterrupt ; Signal end of interrupt
POP {R3, R4} ; Restore stack adjustment(R3) and user data(R4)
ADD SP, SP, R3 ; Unadjust stack
BL osRtxContextSwitch ; Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUBS R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0]
CLREX ; Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from IRQ handler
SVC_Handler
EXPORT SVC_Handler
IMPORT IRQ_Disable
IMPORT IRQ_Enable
IMPORT osRtxUserSVC
IMPORT osRtxInfo
SRSFD SP!, #MODE_SVC ; Store SPSR_svc and LR_svc onto SVC stack
PUSH {R12, LR}
MRS R12, SPSR ; Load SPSR
TST R12, #CPSR_BIT_T ; Thumb bit set?
LDRHNE R12, [LR,#-2] ; Thumb: load halfword
BICNE R12, R12, #0xFF00 ; extract SVC number
LDREQ R12, [LR,#-4] ; ARM: load word
BICEQ R12, R12, #0xFF000000 ; extract SVC number
CMP R12, #0 ; Compare SVC number
BNE SVC_User ; Branch if User SVC
PUSH {R0-R3} ; Push arguments to stack
LDR R0, =SVC_Active
MOV R1, #1
STRB R1, [R0] ; Set SVC Handler Active
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 ; Increment IRQ nesting level
STR R1, [R0]
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] ; Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING ; Check osKernelRunning
BLT SVC_FuncCall ; Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
BLX IRQ_Disable ; Disable OS Tick interrupt
SVC_FuncCall
LDM SP, {R0-R3, R12} ; Reload R0-R3 and R12 from stack
CPSIE i ; Re-enable interrupts
BLX R12 ; Branch to SVC function
CPSID i ; Disable interrupts
STR R0, [SP] ; Store function return value
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] ; Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING ; Check osKernelRunning
BLT SVC_ContextCheck ; Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
BLX IRQ_Enable ; Enable OS Tick interrupt
SVC_ContextCheck
BL osRtxContextSwitch ; Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUB R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0]
LDR R0, =SVC_Active
MOV R1, #0
STRB R1, [R0] ; Clear SVC Handler Active
CLREX ; Clear exclusive monitor
POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception
SVC_User
PUSH {R4, R5}
LDR R5,=osRtxUserSVC ; Load address of SVC table
LDR R4,[R5] ; Load SVC maximum number
CMP R12,R4 ; Check SVC number range
BHI SVC_Done ; Branch if out of range
LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
BLX R12 ; Call SVC Function
SVC_Done
CLREX ; Clear exclusive monitor
POP {R4, R5, R12, LR}
RFEFD SP! ; Return from exception
osRtxContextSwitch
EXPORT osRtxContextSwitch
IMPORT osRtxPendSV_Handler
IMPORT osRtxInfo
#ifdef RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
#endif
IMPORT IRQ_Disable
IMPORT IRQ_Enable
PUSH {LR}
; Check interrupt nesting level
LDR R0, =IRQ_NestLevel
LDR R1, [R0] ; Load IRQ nest level
CMP R1, #1
BNE osRtxContextExit ; Nesting interrupts, exit context switcher
LDR R12, =osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.run
LDM R12, {R0, R1} ; Load osRtxInfo.thread.run: curr & next
LDR R2, =IRQ_PendSV ; Load address of IRQ_PendSV flag
LDRB R3, [R2] ; Load PendSV flag
CMP R0, R1 ; Check if context switch is required
BNE osRtxContextCheck ; Not equal, check if context save required
CMP R3, #1 ; Compare IRQ_PendSV value
BNE osRtxContextExit ; No post processing (and no context switch requested)
osRtxContextCheck
STR R1, [R12] ; Store run.next as run.curr
; R0 = curr, R1 = next, R2 = &IRQ_PendSV, R12 = &osRtxInfo.thread.run
PUSH {R0-R2, R12}
CMP R0, #0 ; Is osRtxInfo.thread.run.curr == 0
BEQ osRtxPostProcess ; Current deleted, skip context save
osRtxContextSave
MOV LR, R0 ; Move &osRtxInfo.thread.run.curr to LR
MOV R0, SP ; Move SP_svc into R0
ADD R0, R0, #20 ; Adjust SP_svc to R0 of the basic frame
SUB SP, SP, #4
STM SP, {SP}^ ; Save SP_usr to current stack
POP {R1} ; Pop SP_usr into R1
SUB R1, R1, #64 ; Adjust SP_usr to R4 of the basic frame
STMIA R1!, {R4-R11} ; Save R4-R11 to user stack
LDMIA R0!, {R4-R8} ; Load stacked R0-R3,R12 into R4-R8
STMIA R1!, {R4-R8} ; Store them to user stack
STM R1, {LR}^ ; Store LR_usr directly
ADD R1, R1, #4 ; Adjust user sp to PC
LDMIB R0!, {R5-R6} ; Load stacked PC, CPSR
STMIA R1!, {R5-R6} ; Store them to user stack
SUB R1, R1, #64 ; Adjust SP_usr to stacked R4
; Check if VFP state need to be saved
MRC p15, 0, R2, c1, c0, 2 ; VFP/NEON access enabled? (CPACR)
AND R2, R2, #0x00F00000
CMP R2, #0x00F00000
BNE osRtxContextSave1 ; Continue, no VFP
VMRS R2, FPSCR
STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
VSTMDB R1!, {D0-D15} ; Save D0-D15
#ifdef __ARM_ADVANCED_SIMD__
VSTMDB R1!, {D16-D31} ; Save D16-D31
#endif
LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info
#ifdef __ARM_ADVANCED_SIMD__
ORR R2, R2, #4 ; NEON state
#else
ORR R2, R2, #2 ; VFP state
#endif
STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state
osRtxContextSave1
STR R1, [LR, #TCB_SP_OFS] ; Store user sp to osRtxInfo.thread.run.curr
osRtxPostProcess
; RTX IRQ post processing check
POP {R8-R11} ; Pop R8 = curr, R9 = next, R10 = &IRQ_PendSV, R11 = &osRtxInfo.thread.run
LDRB R0, [R10] ; Load PendSV flag
CMP R0, #1 ; Compare PendSV value
BNE osRtxContextRestore ; Skip post processing if not pending
MOV R4, SP ; Move SP_svc into R4
AND R4, R4, #4 ; Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R4 ; Adjust stack
; Disable OS Tick
LDR R5, =osRtxInfo ; Load address of osRtxInfo
LDR R5, [R5, #I_TICK_IRQN_OFS] ; Load OS Tick irqn
MOV R0, R5 ; Set it as function parameter
BLX IRQ_Disable ; Disable OS Tick interrupt
MOV R6, #0 ; Set PendSV clear value
B osRtxPendCheck
osRtxPendExec
STRB R6, [R10] ; Clear PendSV flag
CPSIE i ; Re-enable interrupts
BLX osRtxPendSV_Handler ; Post process pending objects
CPSID i ; Disable interrupts
osRtxPendCheck
LDR R9, [R11, #4] ; Load osRtxInfo.thread.run.next
STR R9, [R11] ; Store run.next as run.curr
LDRB R0, [R10] ; Load PendSV flag
CMP R0, #1 ; Compare PendSV value
BEQ osRtxPendExec ; Branch to PendExec if PendSV is set
; Re-enable OS Tick
MOV R0, R5 ; Restore irqn as function parameter
BLX IRQ_Enable ; Enable OS Tick interrupt
ADD SP, SP, R4 ; Restore stack adjustment
osRtxContextRestore
#ifdef RTX_EXECUTION_ZONE
LDRB R0, [R9, #TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.next: zone
CMP R8, #0
BEQ osRtxZoneSetup ; Branch if running thread is deleted
LDRB R1, [R8, #TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.curr: zone
CMP R0, R1 ; Check if next:zone == curr:zone
BEQ osRtxContextRestoreFrame ; Branch if zone has not changed
osRtxZoneSetup
BL osZoneSetup_Callback ; Setup zone for next thread
#endif
osRtxContextRestoreFrame
LDR LR, [R8, #TCB_SP_OFS] ; Load next osRtxThread_t.sp
LDRB R2, [R8, #TCB_SP_FRAME] ; Load next osRtxThread_t.stack_frame
ANDS R2, R2, #0x6 ; Check stack frame for VFP context
MRC p15, 0, R2, c1, c0, 2 ; Read CPACR
ANDEQ R2, R2, #0xFF0FFFFF ; VFP/NEON state not stacked, disable VFP/NEON
ORRNE R2, R2, #0x00F00000 ; VFP/NEON state is stacked, enable VFP/NEON
MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
BEQ osRtxContextRestore1 ; No VFP
ISB ; Sync if VFP was enabled
#ifdef __ARM_ADVANCED_SIMD__
VLDMIA LR!, {D16-D31} ; Restore D16-D31
#endif
VLDMIA LR!, {D0-D15} ; Restore D0-D15
LDR R2, [LR]
VMSR FPSCR, R2 ; Restore FPSCR
ADD LR, LR, #8 ; Adjust sp pointer to R4
osRtxContextRestore1
LDMIA LR!, {R4-R11} ; Restore R4-R11
ADD R12, LR, #32 ; Adjust sp and save it into R12
PUSH {R12} ; Push sp onto stack
LDM SP, {SP}^ ; Restore SP_usr directly
ADD SP, SP, #4 ; Adjust SP_svc
LDMIA LR!, {R0-R3, R12} ; Load user registers R0-R3,R12
STMIB SP!, {R0-R3, R12} ; Store them to SP_svc
LDM LR, {LR}^ ; Restore LR_usr directly
LDMIB LR!, {R0-R1} ; Load user registers PC,CPSR
ADD SP, SP, #4
STMIB SP!, {R0-R1} ; Store them to SP_svc
SUB SP, SP, #32 ; Adjust SP_svc to stacked LR
osRtxContextExit
POP {PC} ; Return
END

View File

@ -0,0 +1,275 @@
;/*
; * Copyright (c) 2013-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv7m.s
#include "rtx_def.h"
#ifdef __ARMVFP__
FPU_USED EQU 1
#else
FPU_USED EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
THUMB
SECTION .text:CODE:NOROOT(2)
SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
#endif
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP ; Get MSP if return stack is MSP
MRSNE R0,PSP ; Get PSP if return stack is PSP
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
LDR R12,[R0,#16] ; Load function address from stack
SUB R1,R12,#1 ; Clear T-bit of function address
LSLS R2,R1,#30 ; Check if 4-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R12 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R12,LR} ; Restore SP and EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
#endif
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if (FPU_USED != 0)
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
#else
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
#endif
SVC_ContextSave
#ifdef RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
#if (FPU_USED != 0)
MOV R4,R1 ; Assign osRtxInfo.thread.run.curr to R4
#endif
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
#if (FPU_USED != 0)
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
#else
B SVC_ContextRestore ; Branch to context restore handling
#endif
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRSB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
#endif
STM R12,{R4-R11} ; Save R4..R11
#else
STMDB R12!,{R4-R11} ; Save R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
#endif
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4, clear Z flag
#ifdef RTX_EXECUTION_ZONE
LDRB R0,[R2,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.next: zone
CBZ R1,SVC_ZoneSetup ; Branch if running thread is deleted (Z flag unchanged)
LDRB R1,[R1,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
SVC_ZoneSetup
IT NE ; If zone has changed or running thread is deleted
BLNE osZoneSetup_Callback ; Setup zone for next thread
#endif
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#else
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] ; Load address of SVC function
LDM R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
BX LR ; Return from handler
PendSV_Handler
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
SysTick_Handler
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
#ifdef RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
#endif
END

View File

@ -0,0 +1,307 @@
;/*
; * Copyright (c) 2016-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8-M Baseline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv8mbl.s
#include "rtx_def.h"
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SM_OFS EQU 48 ; TCB.stack_mem offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
SECTION .text:CODE:NOROOT(2)
THUMB
SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
#endif
#ifdef RTX_TZ_CONTEXT
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP ; Branch if return stack is MSP
MRS R0,PSP ; Get PSP
SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
SUBS R1,R7,#0x01 ; Clear T-bit of function address
LSLS R2,R1,#29 ; Check if 8-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R7 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R2,R3} ; Restore SP and EXC_RETURN
MOV LR,R3 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
#endif
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R7 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
#ifdef RTX_TZ_CONTEXT
MOV R3,LR ; Get EXC_RETURN
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R0-R3} ; Save registers
BL TZ_StoreContext_S ; Store secure context
POP {R0-R3} ; Restore registers
MOV LR,R3 ; Set EXC_RETURN
#endif
SVC_ContextSave_NS
MRS R0,PSP ; Get PSP
#if (DOMAIN_NS != 0)
MOV R3,LR ; Get EXC_RETURN
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextSaveSP ; Branch if secure
#endif
#ifdef RTX_STACK_CHECK
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R3,LR ; Get EXC_RETURN
MOVS R0,#TCB_SF_OFS ; Get TCB.stack_frame offset
STRB R3,[R1,R0] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
#if (DOMAIN_NS != 0)
MOVS R0,#TCB_SF_OFS ; Get TCB.stack_frame offset
LDRSB R3,[R1,R0] ; Load stack frame information
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestore ; Branch if secure
#endif
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
#else
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SUBS R0,R0,#32 ; Adjust address
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R3,LR ; Get EXC_RETURN
MOVS R0,#TCB_SF_OFS ; Get TCB.stack_frame offset
STRB R3,[R1,R0] ; Store stack frame information
#endif
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4
#ifdef RTX_EXECUTION_ZONE
MOVS R3,#TCB_ZONE_OFS ; Get TCB.zone offset
LDRB R0,[R2,R3] ; Load osRtxInfo.thread.run.next: zone
CBZ R1,SVC_ZoneSetup ; Branch if running thread is deleted
LDRB R1,[R1,R3] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
BEQ SVC_ContextRestore_S ; Branch if zone has not changed
SVC_ZoneSetup
BL osZoneSetup_Callback ; Setup zone for next thread
#endif
SVC_ContextRestore_S
#ifdef RTX_TZ_CONTEXT
LDR R0,[R4,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore_NS ; Branch if there is no secure context
BL TZ_LoadContext_S ; Load secure context
#endif
SVC_ContextRestore_NS
LDR R0,[R4,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R0 ; Set PSPLIM
MOVS R0,#TCB_SF_OFS ; Get TCB.stack_frame offset
LDRSB R3,[R4,R0] ; Load stack frame information
MOV LR,R3 ; Set EXC_RETURN
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
#if (DOMAIN_NS != 0)
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestoreSP ; Branch if secure
#endif
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
ADDS R0,R0,#16 ; Adjust address
SVC_ContextRestoreSP
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
SVC_MSP
MRS R0,MSP ; Get MSP
B SVC_Number
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] ; Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STR R0,[R2] ; Store function return value
BX R3 ; Return from handler
PendSV_Handler
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
SysTick_Handler
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context ; Branch to context handling
#ifdef RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
B SVC_Context ; Branch to context handling
ALIGN
ENDP
#endif
END

View File

@ -0,0 +1,324 @@
;/*
; * Copyright (c) 2016-2023 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
; * Licensed under the Apache License, Version 2.0 (the License); you may
; * not use this file except in compliance with the License.
; * You may obtain a copy of the License at
; *
; * www.apache.org/licenses/LICENSE-2.0
; *
; * Unless required by applicable law or agreed to in writing, software
; * distributed under the License is distributed on an AS IS BASIS, WITHOUT
; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; * See the License for the specific language governing permissions and
; * limitations under the License.
; *
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8-M Mainline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv8mml.s
#include "rtx_def.h"
#ifdef __ARMVFP__
FPU_USED EQU 1
#else
FPU_USED EQU 0
#endif
#if (defined(__ARM_FEATURE_MVE) && (__ARM_FEATURE_MVE > 0))
MVE_USED EQU 1
#else
MVE_USED EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SM_OFS EQU 48 ; TCB.stack_mem offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
TCB_ZONE_OFS EQU 68 ; TCB.zone offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
osRtxErrorSVC EQU 6 ; Invalid SVC function called
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
EXPORT irqRtxLib
irqRtxLib DCB 0 ; Non weak library reference
SECTION .text:CODE:NOROOT(2)
THUMB
SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_SVC_PTR_CHECK
IMPORT |Image$$RTX_SVC_VENEERS$$Base|
IMPORT |Image$$RTX_SVC_VENEERS$$Length|
IMPORT osRtxKernelErrorNotify
#endif
#ifdef RTX_EXECUTION_ZONE
IMPORT osZoneSetup_Callback
#endif
#ifdef RTX_TZ_CONTEXT
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP ; Get MSP if return stack is MSP
MRSNE R0,PSP ; Get PSP if return stack is PSP
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
#ifdef RTX_SVC_PTR_CHECK
LDR R12,[R0,#16] ; Load function address from stack
SUB R1,R12,#1 ; Clear T-bit of function address
LSLS R2,R1,#30 ; Check if 4-byte aligned
BEQ SVC_PtrBoundsCheck ; Branch if address is aligned
SVC_PtrInvalid
PUSH {R0,LR} ; Save SP and EXC_RETURN
MOVS R0,#osRtxErrorSVC ; Parameter: code
MOV R1,R12 ; Parameter: object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
POP {R12,LR} ; Restore SP and EXC_RETURN
B SVC_Context ; Branch to context handling
SVC_PtrBoundsCheck
LDR R2,=|Image$$RTX_SVC_VENEERS$$Base|
LDR R3,=|Image$$RTX_SVC_VENEERS$$Length|
SUBS R2,R1,R2 ; Subtract SVC table base address
CMP R2,R3 ; Compare with SVC table boundaries
BHS SVC_PtrInvalid ; Branch if address is out of bounds
#endif
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if ((FPU_USED != 0) || (MVE_USED != 0))
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
#else
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
#endif
SVC_ContextSave
#ifdef RTX_TZ_CONTEXT
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R1,R2,R12,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R12,LR} ; Restore registers and EXC_RETURN
#endif
SVC_ContextSave_NS
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextSaveSP ; Branch if secure
#endif
#ifdef RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
#endif
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
#if ((FPU_USED != 0) || (MVE_USED != 0))
MOV R4,R1 ; Assign osRtxInfo.thread.run.curr to R4
#endif
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
MOVS R1,#0 ; Simulate deleted running thread
#if ((FPU_USED != 0) || (MVE_USED != 0))
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
#else
B SVC_ContextRestore ; Branch to context restore handling
#endif
SVC_ContextSaveRegs
LDRSB LR,[R1,#TCB_SF_OFS] ; Load stack frame information
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextRestore ; Branch if secure
#endif
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
#endif
STM R12,{R4-R11} ; Save R4..R11
#else
STMDB R12!,{R4-R11} ; Save R4..R11
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
#endif
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
SVC_ContextRestore
MOVS R4,R2 ; Assign osRtxInfo.thread.run.next to R4, clear Z flag
#ifdef RTX_EXECUTION_ZONE
LDRB R0,[R2,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.next: zone
CBZ R1,SVC_ZoneSetup ; Branch if running thread is deleted (Z flag unchanged)
LDRB R1,[R1,#TCB_ZONE_OFS] ; Load osRtxInfo.thread.run.curr: zone
CMP R0,R1 ; Check if next:zone == curr:zone
SVC_ZoneSetup
IT NE ; If zone has changed or running thread is deleted
BLNE osZoneSetup_Callback ; Setup zone for next thread
#endif
#ifdef RTX_TZ_CONTEXT
LDR R0,[R4,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CMP R0,#0
IT NE ; If TrustZone memory allocated
BLNE TZ_LoadContext_S ; Load secure context
#endif
LDR R0,[R4,#TCB_SP_OFS] ; Load SP
LDR R1,[R4,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R1 ; Set PSPLIM
LDRSB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
ITT NE ; If secure
MSRNE PSP,R0 ; Set PSP
BXNE LR ; Exit from handler
#endif
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
SVC_User
LDR R2,=osRtxUserSVC ; Load address of SVC table
LDR R3,[R2] ; Load SVC maximum number
CMP R1,R3 ; Check SVC number range
BHI SVC_Exit ; Branch if out of range
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] ; Load address of SVC function
LDM R0,{R0-R3} ; Load function parameters from stack
BLX R12 ; Call service function
POP {R12,LR} ; Restore SP and EXC_RETURN
STR R0,[R12] ; Store function return value
BX LR ; Return from handler
PendSV_Handler
EXPORT PendSV_Handler
IMPORT osRtxPendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
SysTick_Handler
EXPORT SysTick_Handler
IMPORT osRtxTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
#ifdef RTX_SAFETY_FEATURES
osFaultResume PROC
EXPORT osFaultResume
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
#endif
END

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex Core definitions
*
* -----------------------------------------------------------------------------
*/
#ifndef RTX_CORE_C_H_
#define RTX_CORE_C_H_
//lint -emacro((923,9078),SCB) "cast from unsigned long to pointer" [MISRA Note 9]
#ifndef RTE_COMPONENTS_H
#include "RTE_Components.h"
#endif
#include CMSIS_device_header
#if ((!defined(__ARM_ARCH_6M__)) && \
(!defined(__ARM_ARCH_7A__)) && \
(!defined(__ARM_ARCH_7M__)) && \
(!defined(__ARM_ARCH_7EM__)) && \
(!defined(__ARM_ARCH_8M_BASE__)) && \
(!defined(__ARM_ARCH_8M_MAIN__)) && \
(!defined(__ARM_ARCH_8_1M_MAIN__)))
#error "Unknown Arm Architecture!"
#endif
#if (defined(__ARM_ARCH_7A__) && (__ARM_ARCH_7A__ != 0))
#include "rtx_core_ca.h"
#else
#include "rtx_core_cm.h"
#endif
#endif // RTX_CORE_C_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Delay functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// ==== Service Calls ====
/// Wait for Timeout (Time Delay).
/// \note API identical to osDelay
static osStatus_t svcRtxDelay (uint32_t ticks) {
osStatus_t status;
if (ticks == 0U) {
EvrRtxDelayError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxDelayStarted(ticks);
status = osOK;
} else {
EvrRtxDelayError((int32_t)osError);
status = osError;
}
return status;
}
/// Wait until specified time.
/// \note API identical to osDelayUntil
static osStatus_t svcRtxDelayUntil (uint32_t ticks) {
osStatus_t status;
ticks -= osRtxInfo.kernel.tick;
if ((ticks == 0U) || (ticks > 0x7FFFFFFFU)) {
EvrRtxDelayError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxDelayUntilStarted(ticks);
status = osOK;
} else {
EvrRtxDelayError((int32_t)osError);
status = osError;
}
return status;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_1(Delay, osStatus_t, uint32_t)
SVC0_1(DelayUntil, osStatus_t, uint32_t)
//lint --flb "Library End"
// ==== Public API ====
/// Wait for Timeout (Time Delay).
osStatus_t osDelay (uint32_t ticks) {
osStatus_t status;
EvrRtxDelay(ticks);
if (IsException() || IsIrqMasked()) {
EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcDelay(ticks);
}
return status;
}
/// Wait until specified time.
osStatus_t osDelayUntil (uint32_t ticks) {
osStatus_t status;
EvrRtxDelayUntil(ticks);
if (IsException() || IsIrqMasked()) {
EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcDelayUntil(ticks);
}
return status;
}

View File

@ -0,0 +1,735 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Event Flags functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Object Memory Usage
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxEventFlagsMemUsage \
__attribute__((section(".data.os.evflags.obj"))) =
{ 0U, 0U, 0U };
#endif
// ==== Helper functions ====
/// Set Event Flags.
/// \param[in] ef event flags object.
/// \param[in] flags specifies the flags to set.
/// \return event flags after setting.
static uint32_t EventFlagsSet (os_event_flags_t *ef, uint32_t flags) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
uint32_t event_flags;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
ef->event_flags |= flags;
event_flags = ef->event_flags;
if (primask == 0U) {
__enable_irq();
}
#else
event_flags = atomic_set32(&ef->event_flags, flags);
#endif
return event_flags;
}
/// Clear Event Flags.
/// \param[in] ef event flags object.
/// \param[in] flags specifies the flags to clear.
/// \return event flags before clearing.
static uint32_t EventFlagsClear (os_event_flags_t *ef, uint32_t flags) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
uint32_t event_flags;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
event_flags = ef->event_flags;
ef->event_flags &= ~flags;
if (primask == 0U) {
__enable_irq();
}
#else
event_flags = atomic_clr32(&ef->event_flags, flags);
#endif
return event_flags;
}
/// Check Event Flags.
/// \param[in] ef event flags object.
/// \param[in] flags specifies the flags to check.
/// \param[in] options specifies flags options (osFlagsXxxx).
/// \return event flags before clearing or 0 if specified flags have not been set.
static uint32_t EventFlagsCheck (os_event_flags_t *ef, uint32_t flags, uint32_t options) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask;
#endif
uint32_t event_flags;
if ((options & osFlagsNoClear) == 0U) {
#if (EXCLUSIVE_ACCESS == 0)
primask = __get_PRIMASK();
__disable_irq();
event_flags = ef->event_flags;
if ((((options & osFlagsWaitAll) != 0U) && ((event_flags & flags) != flags)) ||
(((options & osFlagsWaitAll) == 0U) && ((event_flags & flags) == 0U))) {
event_flags = 0U;
} else {
ef->event_flags &= ~flags;
}
if (primask == 0U) {
__enable_irq();
}
#else
if ((options & osFlagsWaitAll) != 0U) {
event_flags = atomic_chk32_all(&ef->event_flags, flags);
} else {
event_flags = atomic_chk32_any(&ef->event_flags, flags);
}
#endif
} else {
event_flags = ef->event_flags;
if ((((options & osFlagsWaitAll) != 0U) && ((event_flags & flags) != flags)) ||
(((options & osFlagsWaitAll) == 0U) && ((event_flags & flags) == 0U))) {
event_flags = 0U;
}
}
return event_flags;
}
/// Verify that Event Flags object pointer is valid.
/// \param[in] ef event flags object.
/// \return true - valid, false - invalid.
static bool_t IsEventFlagsPtrValid (const os_event_flags_t *ef) {
#ifdef RTX_OBJ_PTR_CHECK
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
uint32_t cb_start = (uint32_t)&__os_evflags_cb_start__;
uint32_t cb_length = (uint32_t)&__os_evflags_cb_length__;
// Check the section boundaries
if (((uint32_t)ef - cb_start) >= cb_length) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check the object alignment
if ((((uint32_t)ef - cb_start) % sizeof(os_event_flags_t)) != 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#else
// Check NULL pointer
if (ef == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#endif
return TRUE;
}
// ==== Library functions ====
/// Destroy an Event Flags object.
/// \param[in] ef event flags object.
static void osRtxEventFlagsDestroy (os_event_flags_t *ef) {
// Mark object as invalid
ef->id = osRtxIdInvalid;
// Free object memory
if ((ef->flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.event_flags, ef);
#else
if (osRtxInfo.mpi.event_flags != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.event_flags, ef);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, ef);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxEventFlagsMemUsage.cnt_free++;
#endif
}
EvrRtxEventFlagsDestroyed(ef);
}
#ifdef RTX_SAFETY_CLASS
/// Delete an Event Flags safety class.
/// \param[in] safety_class safety class.
/// \param[in] mode safety mode.
void osRtxEventFlagsDeleteClass (uint32_t safety_class, uint32_t mode) {
os_event_flags_t *ef;
os_thread_t *thread;
uint32_t length;
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
ef = (os_event_flags_t *)(uint32_t)&__os_evflags_cb_start__;
length = (uint32_t)&__os_evflags_cb_length__;
while (length >= sizeof(os_event_flags_t)) {
if ( (ef->id == osRtxIdEventFlags) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((ef->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((ef->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
while (ef->thread_list != NULL) {
thread = osRtxThreadListGet(osRtxObject(ef));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
}
osRtxEventFlagsDestroy(ef);
}
length -= sizeof(os_event_flags_t);
ef++;
}
}
#endif
// ==== Post ISR processing ====
/// Event Flags post ISR processing.
/// \param[in] ef event flags object.
static void osRtxEventFlagsPostProcess (os_event_flags_t *ef) {
os_thread_t *thread;
os_thread_t *thread_next;
uint32_t event_flags;
// Check if Threads are waiting for Event Flags
thread = ef->thread_list;
while (thread != NULL) {
thread_next = thread->thread_next;
event_flags = EventFlagsCheck(ef, thread->wait_flags, thread->flags_options);
if (event_flags != 0U) {
osRtxThreadListRemove(thread);
osRtxThreadWaitExit(thread, event_flags, FALSE);
EvrRtxEventFlagsWaitCompleted(ef, thread->wait_flags, thread->flags_options, event_flags);
}
thread = thread_next;
}
}
// ==== Service Calls ====
/// Create and Initialize an Event Flags object.
/// \note API identical to osEventFlagsNew
static osEventFlagsId_t svcRtxEventFlagsNew (const osEventFlagsAttr_t *attr) {
os_event_flags_t *ef;
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread = osRtxThreadGetRunning();
uint32_t attr_bits;
#endif
uint8_t flags;
const char *name;
// Process attributes
if (attr != NULL) {
name = attr->name;
#ifdef RTX_SAFETY_CLASS
attr_bits = attr->attr_bits;
#endif
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
ef = attr->cb_mem;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) <
(uint8_t)((attr_bits & osSafetyClass_Msk) >> osSafetyClass_Pos))) {
EvrRtxEventFlagsError(NULL, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
#endif
if (ef != NULL) {
if (!IsEventFlagsPtrValid(ef) || (attr->cb_size != sizeof(os_event_flags_t))) {
EvrRtxEventFlagsError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (attr->cb_size != 0U) {
EvrRtxEventFlagsError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
} else {
name = NULL;
#ifdef RTX_SAFETY_CLASS
attr_bits = 0U;
#endif
ef = NULL;
}
// Allocate object memory if not provided
if (ef == NULL) {
if (osRtxInfo.mpi.event_flags != NULL) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
ef = osRtxMemoryPoolAlloc(osRtxInfo.mpi.event_flags);
#ifndef RTX_OBJ_PTR_CHECK
} else {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
ef = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_event_flags_t), 1U);
#endif
}
#ifdef RTX_OBJ_MEM_USAGE
if (ef != NULL) {
uint32_t used;
osRtxEventFlagsMemUsage.cnt_alloc++;
used = osRtxEventFlagsMemUsage.cnt_alloc - osRtxEventFlagsMemUsage.cnt_free;
if (osRtxEventFlagsMemUsage.max_used < used) {
osRtxEventFlagsMemUsage.max_used = used;
}
}
#endif
flags = osRtxFlagSystemObject;
} else {
flags = 0U;
}
if (ef != NULL) {
// Initialize control block
ef->id = osRtxIdEventFlags;
ef->flags = flags;
ef->attr = 0U;
ef->name = name;
ef->thread_list = NULL;
ef->event_flags = 0U;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
ef->attr |= (uint8_t)((attr_bits & osSafetyClass_Msk) >>
(osSafetyClass_Pos - osRtxAttrClass_Pos));
} else {
// Inherit safety class from the running thread
if (thread != NULL) {
ef->attr |= (uint8_t)(thread->attr & osRtxAttrClass_Msk);
}
}
#endif
// Register post ISR processing function
osRtxInfo.post_process.event_flags = osRtxEventFlagsPostProcess;
EvrRtxEventFlagsCreated(ef, ef->name);
} else {
EvrRtxEventFlagsError(NULL, (int32_t)osErrorNoMemory);
}
return ef;
}
/// Get name of an Event Flags object.
/// \note API identical to osEventFlagsGetName
static const char *svcRtxEventFlagsGetName (osEventFlagsId_t ef_id) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags)) {
EvrRtxEventFlagsGetName(ef, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxEventFlagsGetName(ef, ef->name);
return ef->name;
}
/// Set the specified Event Flags.
/// \note API identical to osEventFlagsSet
static uint32_t svcRtxEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
os_thread_t *thread;
os_thread_t *thread_next;
uint32_t event_flags;
uint32_t event_flags0;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter);
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (ef->attr >> osRtxAttrClass_Pos))) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorSafetyClass);
}
#endif
// Set Event Flags
event_flags = EventFlagsSet(ef, flags);
// Check if Threads are waiting for Event Flags
thread = ef->thread_list;
while (thread != NULL) {
thread_next = thread->thread_next;
event_flags0 = EventFlagsCheck(ef, thread->wait_flags, thread->flags_options);
if (event_flags0 != 0U) {
if ((thread->flags_options & osFlagsNoClear) == 0U) {
event_flags = event_flags0 & ~thread->wait_flags;
} else {
event_flags = event_flags0;
}
osRtxThreadListRemove(thread);
osRtxThreadWaitExit(thread, event_flags0, FALSE);
EvrRtxEventFlagsWaitCompleted(ef, thread->wait_flags, thread->flags_options, event_flags0);
}
thread = thread_next;
}
osRtxThreadDispatch(NULL);
EvrRtxEventFlagsSetDone(ef, event_flags);
return event_flags;
}
/// Clear the specified Event Flags.
/// \note API identical to osEventFlagsClear
static uint32_t svcRtxEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
uint32_t event_flags;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter);
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (ef->attr >> osRtxAttrClass_Pos))) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorSafetyClass);
}
#endif
// Clear Event Flags
event_flags = EventFlagsClear(ef, flags);
EvrRtxEventFlagsClearDone(ef, event_flags);
return event_flags;
}
/// Get the current Event Flags.
/// \note API identical to osEventFlagsGet
static uint32_t svcRtxEventFlagsGet (osEventFlagsId_t ef_id) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags)) {
EvrRtxEventFlagsGet(ef, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxEventFlagsGet(ef, ef->event_flags);
return ef->event_flags;
}
/// Wait for one or more Event Flags to become signaled.
/// \note API identical to osEventFlagsWait
static uint32_t svcRtxEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
os_thread_t *thread;
uint32_t event_flags;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter);
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (ef->attr >> osRtxAttrClass_Pos))) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorSafetyClass);
}
#endif
// Check Event Flags
event_flags = EventFlagsCheck(ef, flags, options);
if (event_flags != 0U) {
EvrRtxEventFlagsWaitCompleted(ef, flags, options, event_flags);
} else {
// Check if timeout is specified
if (timeout != 0U) {
EvrRtxEventFlagsWaitPending(ef, flags, options, timeout);
// Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingEventFlags, timeout)) {
thread = osRtxThreadGetRunning();
osRtxThreadListPut(osRtxObject(ef), thread);
// Store waiting flags and options
thread->wait_flags = flags;
thread->flags_options = (uint8_t)options;
} else {
EvrRtxEventFlagsWaitTimeout(ef);
}
event_flags = (uint32_t)osErrorTimeout;
} else {
EvrRtxEventFlagsWaitNotCompleted(ef, flags, options);
event_flags = (uint32_t)osErrorResource;
}
}
return event_flags;
}
/// Delete an Event Flags object.
/// \note API identical to osEventFlagsDelete
static osStatus_t svcRtxEventFlagsDelete (osEventFlagsId_t ef_id) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
os_thread_t *thread;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (ef->attr >> osRtxAttrClass_Pos))) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Unblock waiting threads
if (ef->thread_list != NULL) {
do {
thread = osRtxThreadListGet(osRtxObject(ef));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
} while (ef->thread_list != NULL);
osRtxThreadDispatch(NULL);
}
osRtxEventFlagsDestroy(ef);
return osOK;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_1(EventFlagsNew, osEventFlagsId_t, const osEventFlagsAttr_t *)
SVC0_1(EventFlagsGetName, const char *, osEventFlagsId_t)
SVC0_2(EventFlagsSet, uint32_t, osEventFlagsId_t, uint32_t)
SVC0_2(EventFlagsClear, uint32_t, osEventFlagsId_t, uint32_t)
SVC0_1(EventFlagsGet, uint32_t, osEventFlagsId_t)
SVC0_4(EventFlagsWait, uint32_t, osEventFlagsId_t, uint32_t, uint32_t, uint32_t)
SVC0_1(EventFlagsDelete, osStatus_t, osEventFlagsId_t)
//lint --flb "Library End"
// ==== ISR Calls ====
/// Set the specified Event Flags.
/// \note API identical to osEventFlagsSet
__STATIC_INLINE
uint32_t isrRtxEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
uint32_t event_flags;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter);
}
// Set Event Flags
event_flags = EventFlagsSet(ef, flags);
// Register post ISR processing
osRtxPostProcess(osRtxObject(ef));
EvrRtxEventFlagsSetDone(ef, event_flags);
return event_flags;
}
/// Wait for one or more Event Flags to become signaled.
/// \note API identical to osEventFlagsWait
__STATIC_INLINE
uint32_t isrRtxEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
uint32_t event_flags;
// Check parameters
if (!IsEventFlagsPtrValid(ef) || (ef->id != osRtxIdEventFlags) || (timeout != 0U) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
EvrRtxEventFlagsError(ef, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter);
}
// Check Event Flags
event_flags = EventFlagsCheck(ef, flags, options);
if (event_flags != 0U) {
EvrRtxEventFlagsWaitCompleted(ef, flags, options, event_flags);
} else {
EvrRtxEventFlagsWaitNotCompleted(ef, flags, options);
event_flags = (uint32_t)osErrorResource;
}
return event_flags;
}
// ==== Public API ====
/// Create and Initialize an Event Flags object.
osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
osEventFlagsId_t ef_id;
EvrRtxEventFlagsNew(attr);
if (IsException() || IsIrqMasked()) {
EvrRtxEventFlagsError(NULL, (int32_t)osErrorISR);
ef_id = NULL;
} else {
ef_id = __svcEventFlagsNew(attr);
}
return ef_id;
}
/// Get name of an Event Flags object.
const char *osEventFlagsGetName (osEventFlagsId_t ef_id) {
const char *name;
if (IsException() || IsIrqMasked()) {
name = svcRtxEventFlagsGetName(ef_id);
} else {
name = __svcEventFlagsGetName(ef_id);
}
return name;
}
/// Set the specified Event Flags.
uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
uint32_t event_flags;
EvrRtxEventFlagsSet(ef_id, flags);
if (IsException() || IsIrqMasked()) {
event_flags = isrRtxEventFlagsSet(ef_id, flags);
} else {
event_flags = __svcEventFlagsSet(ef_id, flags);
}
return event_flags;
}
/// Clear the specified Event Flags.
uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
uint32_t event_flags;
EvrRtxEventFlagsClear(ef_id, flags);
if (IsException() || IsIrqMasked()) {
event_flags = svcRtxEventFlagsClear(ef_id, flags);
} else {
event_flags = __svcEventFlagsClear(ef_id, flags);
}
return event_flags;
}
/// Get the current Event Flags.
uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
uint32_t event_flags;
if (IsException() || IsIrqMasked()) {
event_flags = svcRtxEventFlagsGet(ef_id);
} else {
event_flags = __svcEventFlagsGet(ef_id);
}
return event_flags;
}
/// Wait for one or more Event Flags to become signaled.
uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
uint32_t event_flags;
EvrRtxEventFlagsWait(ef_id, flags, options, timeout);
if (IsException() || IsIrqMasked()) {
event_flags = isrRtxEventFlagsWait(ef_id, flags, options, timeout);
} else {
event_flags = __svcEventFlagsWait(ef_id, flags, options, timeout);
}
return event_flags;
}
/// Delete an Event Flags object.
osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
osStatus_t status;
EvrRtxEventFlagsDelete(ef_id);
if (IsException() || IsIrqMasked()) {
EvrRtxEventFlagsError(ef_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcEventFlagsDelete(ef_id);
}
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,971 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Kernel functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Information
osRtxInfo_t osRtxInfo __attribute__((section(".data.os"))) =
//lint -e{785} "Initialize only OS ID, OS Version and Kernel State"
{ .os_id = osRtxKernelId, .version = osRtxVersionKernel, .kernel.state = osRtxKernelInactive };
// ==== Helper functions ====
/// Block Kernel (disable: thread switching, time tick, post ISR processing).
static void KernelBlock (void) {
OS_Tick_Disable();
osRtxInfo.kernel.blocked = 1U;
__DSB();
if (GetPendSV() != 0U) {
ClrPendSV();
osRtxInfo.kernel.pendSV = 1U;
}
}
/// Unblock Kernel
static void KernelUnblock (void) {
osRtxInfo.kernel.blocked = 0U;
__DSB();
if (osRtxInfo.kernel.pendSV != 0U) {
osRtxInfo.kernel.pendSV = 0U;
SetPendSV();
}
OS_Tick_Enable();
}
// Get Kernel sleep time
static uint32_t GetKernelSleepTime (void) {
const os_thread_t *thread;
const os_timer_t *timer;
uint32_t delay;
delay = osWaitForever;
// Check Thread Delay list
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
delay = thread->delay;
}
#ifdef RTX_THREAD_WATCHDOG
// Check Thread Watchdog list
thread = osRtxInfo.thread.wdog_list;
if (thread != NULL) {
if (thread->wdog_tick < delay) {
delay = thread->wdog_tick;
}
}
#endif
// Check Active Timer list
timer = osRtxInfo.timer.list;
if (timer != NULL) {
if (timer->tick < delay) {
delay = timer->tick;
}
}
return delay;
}
// ==== Service Calls ====
/// Initialize the RTOS Kernel.
/// \note API identical to osKernelInitialize
static osStatus_t svcRtxKernelInitialize (void) {
if (osRtxInfo.kernel.state == osRtxKernelReady) {
EvrRtxKernelInitialized();
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osOK;
}
if (osRtxInfo.kernel.state != osRtxKernelInactive) {
EvrRtxKernelError((int32_t)osError);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
#ifdef RTX_TZ_CONTEXT
// Initialize Secure Process Stack
if (TZ_InitContextSystem_S() == 0U) {
EvrRtxKernelError(osRtxErrorTZ_InitContext_S);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
#endif
// Initialize osRtxInfo
(void)memset(&osRtxInfo.kernel, 0, sizeof(osRtxInfo) - offsetof(osRtxInfo_t, kernel));
osRtxInfo.isr_queue.data = osRtxConfig.isr_queue.data;
osRtxInfo.isr_queue.max = osRtxConfig.isr_queue.max;
osRtxInfo.thread.robin.timeout = osRtxConfig.robin_timeout;
// Initialize Memory Pools (Variable Block Size)
if (osRtxMemoryInit(osRtxConfig.mem.common_addr, osRtxConfig.mem.common_size) != 0U) {
osRtxInfo.mem.common = osRtxConfig.mem.common_addr;
}
if (osRtxMemoryInit(osRtxConfig.mem.stack_addr, osRtxConfig.mem.stack_size) != 0U) {
osRtxInfo.mem.stack = osRtxConfig.mem.stack_addr;
} else {
osRtxInfo.mem.stack = osRtxInfo.mem.common;
}
if (osRtxMemoryInit(osRtxConfig.mem.mp_data_addr, osRtxConfig.mem.mp_data_size) != 0U) {
osRtxInfo.mem.mp_data = osRtxConfig.mem.mp_data_addr;
} else {
osRtxInfo.mem.mp_data = osRtxInfo.mem.common;
}
if (osRtxMemoryInit(osRtxConfig.mem.mq_data_addr, osRtxConfig.mem.mq_data_size) != 0U) {
osRtxInfo.mem.mq_data = osRtxConfig.mem.mq_data_addr;
} else {
osRtxInfo.mem.mq_data = osRtxInfo.mem.common;
}
// Initialize Memory Pools (Fixed Block Size)
if (osRtxConfig.mpi.stack != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.stack,
osRtxConfig.mpi.stack->max_blocks,
osRtxConfig.mpi.stack->block_size,
osRtxConfig.mpi.stack->block_base);
osRtxInfo.mpi.stack = osRtxConfig.mpi.stack;
}
if (osRtxConfig.mpi.thread != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.thread,
osRtxConfig.mpi.thread->max_blocks,
osRtxConfig.mpi.thread->block_size,
osRtxConfig.mpi.thread->block_base);
osRtxInfo.mpi.thread = osRtxConfig.mpi.thread;
}
if (osRtxConfig.mpi.timer != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.timer,
osRtxConfig.mpi.timer->max_blocks,
osRtxConfig.mpi.timer->block_size,
osRtxConfig.mpi.timer->block_base);
osRtxInfo.mpi.timer = osRtxConfig.mpi.timer;
}
if (osRtxConfig.mpi.event_flags != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.event_flags,
osRtxConfig.mpi.event_flags->max_blocks,
osRtxConfig.mpi.event_flags->block_size,
osRtxConfig.mpi.event_flags->block_base);
osRtxInfo.mpi.event_flags = osRtxConfig.mpi.event_flags;
}
if (osRtxConfig.mpi.mutex != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.mutex,
osRtxConfig.mpi.mutex->max_blocks,
osRtxConfig.mpi.mutex->block_size,
osRtxConfig.mpi.mutex->block_base);
osRtxInfo.mpi.mutex = osRtxConfig.mpi.mutex;
}
if (osRtxConfig.mpi.semaphore != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.semaphore,
osRtxConfig.mpi.semaphore->max_blocks,
osRtxConfig.mpi.semaphore->block_size,
osRtxConfig.mpi.semaphore->block_base);
osRtxInfo.mpi.semaphore = osRtxConfig.mpi.semaphore;
}
if (osRtxConfig.mpi.memory_pool != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.memory_pool,
osRtxConfig.mpi.memory_pool->max_blocks,
osRtxConfig.mpi.memory_pool->block_size,
osRtxConfig.mpi.memory_pool->block_base);
osRtxInfo.mpi.memory_pool = osRtxConfig.mpi.memory_pool;
}
if (osRtxConfig.mpi.message_queue != NULL) {
(void)osRtxMemoryPoolInit(osRtxConfig.mpi.message_queue,
osRtxConfig.mpi.message_queue->max_blocks,
osRtxConfig.mpi.message_queue->block_size,
osRtxConfig.mpi.message_queue->block_base);
osRtxInfo.mpi.message_queue = osRtxConfig.mpi.message_queue;
}
osRtxInfo.kernel.state = osRtxKernelReady;
EvrRtxKernelInitialized();
return osOK;
}
/// Get RTOS Kernel Information.
/// \note API identical to osKernelGetInfo
static osStatus_t svcRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
uint32_t size;
if (version != NULL) {
version->api = osRtxVersionAPI;
version->kernel = osRtxVersionKernel;
}
if ((id_buf != NULL) && (id_size != 0U)) {
if (id_size > sizeof(osRtxKernelId)) {
size = sizeof(osRtxKernelId);
} else {
size = id_size;
}
(void)memcpy(id_buf, osRtxKernelId, size);
}
EvrRtxKernelInfoRetrieved(version, id_buf, id_size);
return osOK;
}
/// Get the current RTOS Kernel state.
/// \note API identical to osKernelGetState
static osKernelState_t svcRtxKernelGetState (void) {
osKernelState_t state = osRtxKernelState();
EvrRtxKernelGetState(state);
return state;
}
/// Start the RTOS Kernel scheduler.
/// \note API identical to osKernelStart
static osStatus_t svcRtxKernelStart (void) {
os_thread_t *thread;
if (osRtxInfo.kernel.state != osRtxKernelReady) {
EvrRtxKernelError(osRtxErrorKernelNotReady);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
// Thread startup (Idle and Timer Thread)
if (!osRtxThreadStartup()) {
EvrRtxKernelError((int32_t)osError);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
// Setup SVC and PendSV System Service Calls
SVC_Setup();
// Setup RTOS Tick
if (OS_Tick_Setup(osRtxConfig.tick_freq, OS_TICK_HANDLER) != 0) {
EvrRtxKernelError((int32_t)osError);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
osRtxInfo.tick_irqn = OS_Tick_GetIRQn();
// Enable RTOS Tick
OS_Tick_Enable();
// Switch to Ready Thread with highest Priority
thread = osRtxThreadListGet(&osRtxInfo.thread.ready);
osRtxThreadSwitch(thread);
osRtxInfo.kernel.state = osRtxKernelRunning;
EvrRtxKernelStarted();
return osOK;
}
/// Lock the RTOS Kernel scheduler.
/// \note API identical to osKernelLock
static int32_t svcRtxKernelLock (void) {
int32_t lock;
switch (osRtxInfo.kernel.state) {
case osRtxKernelRunning:
#ifdef RTX_SAFETY_CLASS
// Check the safety class
if ((osRtxThreadGetRunning()->attr >> osRtxAttrClass_Pos) <
(osRtxInfo.kernel.protect >> osRtxKernelProtectClass_Pos)) {
EvrRtxKernelError((int32_t)osErrorSafetyClass);
lock = (int32_t)osErrorSafetyClass;
break;
}
#endif
osRtxInfo.kernel.state = osRtxKernelLocked;
EvrRtxKernelLocked(0);
lock = 0;
break;
case osRtxKernelLocked:
EvrRtxKernelLocked(1);
lock = 1;
break;
default:
EvrRtxKernelError((int32_t)osError);
lock = (int32_t)osError;
break;
}
return lock;
}
/// Unlock the RTOS Kernel scheduler.
/// \note API identical to osKernelUnlock
static int32_t svcRtxKernelUnlock (void) {
int32_t lock;
switch (osRtxInfo.kernel.state) {
case osRtxKernelRunning:
EvrRtxKernelUnlocked(0);
lock = 0;
break;
case osRtxKernelLocked:
osRtxInfo.kernel.state = osRtxKernelRunning;
EvrRtxKernelUnlocked(1);
lock = 1;
break;
default:
EvrRtxKernelError((int32_t)osError);
lock = (int32_t)osError;
break;
}
return lock;
}
/// Restore the RTOS Kernel scheduler lock state.
/// \note API identical to osKernelRestoreLock
static int32_t svcRtxKernelRestoreLock (int32_t lock) {
int32_t lock_new;
switch (osRtxInfo.kernel.state) {
case osRtxKernelRunning:
case osRtxKernelLocked:
#ifdef RTX_SAFETY_CLASS
// Check the safety class
if ((osRtxThreadGetRunning()->attr >> osRtxAttrClass_Pos) <
(osRtxInfo.kernel.protect >> osRtxKernelProtectClass_Pos)) {
EvrRtxKernelError((int32_t)osErrorSafetyClass);
lock_new = (int32_t)osErrorSafetyClass;
break;
}
#endif
switch (lock) {
case 0:
osRtxInfo.kernel.state = osRtxKernelRunning;
EvrRtxKernelLockRestored(0);
lock_new = 0;
break;
case 1:
osRtxInfo.kernel.state = osRtxKernelLocked;
EvrRtxKernelLockRestored(1);
lock_new = 1;
break;
default:
EvrRtxKernelError((int32_t)osError);
lock_new = (int32_t)osError;
break;
}
break;
default:
EvrRtxKernelError((int32_t)osError);
lock_new = (int32_t)osError;
break;
}
return lock_new;
}
/// Suspend the RTOS Kernel scheduler.
/// \note API identical to osKernelSuspend
static uint32_t svcRtxKernelSuspend (void) {
uint32_t delay;
if (osRtxInfo.kernel.state != osRtxKernelRunning) {
EvrRtxKernelError(osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
#ifdef RTX_SAFETY_CLASS
// Check the safety class
if ((osRtxThreadGetRunning()->attr >> osRtxAttrClass_Pos) <
(osRtxInfo.kernel.protect >> osRtxKernelProtectClass_Pos)) {
EvrRtxKernelError((int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
#endif
KernelBlock();
osRtxInfo.kernel.state = osRtxKernelSuspended;
delay = GetKernelSleepTime();
EvrRtxKernelSuspended(delay);
return delay;
}
/// Resume the RTOS Kernel scheduler.
/// \note API identical to osKernelResume
static void svcRtxKernelResume (uint32_t sleep_ticks) {
os_thread_t *thread;
os_timer_t *timer;
uint32_t delay;
uint32_t ticks, kernel_tick;
if (osRtxInfo.kernel.state != osRtxKernelSuspended) {
EvrRtxKernelResumed();
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return;
}
delay = GetKernelSleepTime();
if (sleep_ticks >= delay) {
ticks = delay - 1U;
} else {
ticks = sleep_ticks;
}
// Update Thread Delay sleep ticks
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
thread->delay -= ticks;
}
// Update Timer sleep ticks
timer = osRtxInfo.timer.list;
if (timer != NULL) {
timer->tick -= ticks;
}
#ifdef RTX_THREAD_WATCHDOG
// Update Thread Watchdog sleep ticks
thread = osRtxInfo.thread.wdog_list;
if (thread != NULL) {
thread->wdog_tick -= ticks;
}
#endif
kernel_tick = osRtxInfo.kernel.tick + sleep_ticks;
osRtxInfo.kernel.tick += ticks;
while (osRtxInfo.kernel.tick != kernel_tick) {
osRtxInfo.kernel.tick++;
// Process Thread Delays
osRtxThreadDelayTick();
// Process Timers
if (osRtxInfo.timer.tick != NULL) {
osRtxInfo.timer.tick();
}
#ifdef RTX_THREAD_WATCHDOG
// Process Watchdog Timers
osRtxThreadWatchdogTick();
#endif
}
osRtxInfo.kernel.state = osRtxKernelRunning;
osRtxThreadDispatch(NULL);
KernelUnblock();
EvrRtxKernelResumed();
}
#ifdef RTX_SAFETY_CLASS
/// Protect the RTOS Kernel scheduler access.
/// \note API identical to osKernelProtect
static osStatus_t svcRtxKernelProtect (uint32_t safety_class) {
uint32_t thread_class;
osStatus_t status;
// Check parameters
if (safety_class > 0x0FU) {
EvrRtxKernelError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
switch (osRtxInfo.kernel.state) {
case osRtxKernelInactive:
EvrRtxKernelError(osRtxErrorKernelNotReady);
status = osError;
break;
case osRtxKernelReady:
osRtxInfo.kernel.protect &= (uint8_t)~osRtxKernelProtectClass_Msk;
osRtxInfo.kernel.protect |= (uint8_t)(safety_class << osRtxKernelProtectClass_Pos);
EvrRtxKernelProtected();
status = osOK;
break;
case osRtxKernelRunning:
// Check the safety class
thread_class = (uint32_t)osRtxThreadGetRunning()->attr >> osRtxAttrClass_Pos;
if ((safety_class > thread_class) ||
(thread_class < ((uint32_t)osRtxInfo.kernel.protect >> osRtxKernelProtectClass_Pos))) {
EvrRtxKernelError((int32_t)osErrorSafetyClass);
status = osErrorSafetyClass;
break;
}
osRtxInfo.kernel.protect &= (uint8_t)~osRtxKernelProtectClass_Msk;
osRtxInfo.kernel.protect |= (uint8_t)(safety_class << osRtxKernelProtectClass_Pos);
EvrRtxKernelProtected();
status = osOK;
break;
case osRtxKernelLocked:
case osRtxKernelSuspended:
EvrRtxKernelError(osRtxErrorKernelNotRunning);
status = osError;
break;
default:
// Should never come here
status = osError;
break;
}
return status;
}
/// Destroy objects for specified safety classes.
/// \note API identical to osKernelDestroyClass
static osStatus_t svcRtxKernelDestroyClass (uint32_t safety_class, uint32_t mode) {
os_thread_t *thread;
os_thread_t *thread_next;
// Check parameters
if (safety_class > 0x0FU) {
EvrRtxKernelError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
// Check running thread safety class (when called from thread)
thread = osRtxThreadGetRunning();
if ((thread != NULL) && IsSVCallIrq()) {
if ((((mode & osSafetyWithSameClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
(((thread->attr >> osRtxAttrClass_Pos) + 1U) < (uint8_t)safety_class))) {
EvrRtxKernelError((int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
}
// Delete RTOS objects for safety class
osRtxMutexDeleteClass(safety_class, mode);
osRtxSemaphoreDeleteClass(safety_class, mode);
osRtxMemoryPoolDeleteClass(safety_class, mode);
osRtxMessageQueueDeleteClass(safety_class, mode);
osRtxEventFlagsDeleteClass(safety_class, mode);
osRtxTimerDeleteClass(safety_class, mode);
// Threads in Wait List
thread = osRtxInfo.thread.wait_list;
while (thread != NULL) {
thread_next = thread->delay_next;
if ((((mode & osSafetyWithSameClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class))) {
osRtxThreadListRemove(thread);
osRtxThreadDelayRemove(thread);
#ifdef RTX_THREAD_WATCHDOG
osRtxThreadWatchdogRemove(thread);
#endif
osRtxMutexOwnerRelease(thread->mutex_list);
osRtxThreadJoinWakeup(thread);
osRtxThreadDestroy(thread);
}
thread = thread_next;
}
// Threads in Delay List
thread = osRtxInfo.thread.delay_list;
while (thread != NULL) {
thread_next = thread->delay_next;
if ((((mode & osSafetyWithSameClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class))) {
osRtxThreadListRemove(thread);
osRtxThreadDelayRemove(thread);
#ifdef RTX_THREAD_WATCHDOG
osRtxThreadWatchdogRemove(thread);
#endif
osRtxMutexOwnerRelease(thread->mutex_list);
osRtxThreadJoinWakeup(thread);
osRtxThreadDestroy(thread);
}
thread = thread_next;
}
// Threads in Ready List
thread = osRtxInfo.thread.ready.thread_list;
while (thread != NULL) {
thread_next = thread->thread_next;
if ((((mode & osSafetyWithSameClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class))) {
osRtxThreadListRemove(thread);
#ifdef RTX_THREAD_WATCHDOG
osRtxThreadWatchdogRemove(thread);
#endif
osRtxMutexOwnerRelease(thread->mutex_list);
osRtxThreadJoinWakeup(thread);
osRtxThreadDestroy(thread);
}
thread = thread_next;
}
// Running Thread
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((thread->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
if ((osRtxKernelGetState() != osRtxKernelRunning) ||
(osRtxInfo.thread.ready.thread_list == NULL)) {
osRtxThreadDispatch(NULL);
EvrRtxKernelError((int32_t)osErrorResource);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource;
}
#ifdef RTX_THREAD_WATCHDOG
osRtxThreadWatchdogRemove(thread);
#endif
osRtxMutexOwnerRelease(thread->mutex_list);
osRtxThreadJoinWakeup(thread);
// Switch to next Ready Thread
osRtxThreadSwitch(osRtxThreadListGet(&osRtxInfo.thread.ready));
// Update Stack Pointer
thread->sp = __get_PSP();
#ifdef RTX_STACK_CHECK
// Check Stack usage
if (!osRtxThreadStackCheck(thread)) {
osRtxThreadSetRunning(osRtxInfo.thread.run.next);
(void)osRtxKernelErrorNotify(osRtxErrorStackOverflow, thread);
}
#endif
// Mark running thread as deleted
osRtxThreadSetRunning(NULL);
// Destroy Thread
osRtxThreadDestroy(thread);
} else {
osRtxThreadDispatch(NULL);
}
return osOK;
}
#endif
/// Get the RTOS kernel tick count.
/// \note API identical to osKernelGetTickCount
static uint32_t svcRtxKernelGetTickCount (void) {
EvrRtxKernelGetTickCount(osRtxInfo.kernel.tick);
return osRtxInfo.kernel.tick;
}
/// Get the RTOS kernel tick frequency.
/// \note API identical to osKernelGetTickFreq
static uint32_t svcRtxKernelGetTickFreq (void) {
EvrRtxKernelGetTickFreq(osRtxConfig.tick_freq);
return osRtxConfig.tick_freq;
}
/// Get the RTOS kernel system timer count.
/// \note API identical to osKernelGetSysTimerCount
static uint32_t svcRtxKernelGetSysTimerCount (void) {
uint32_t tick;
uint32_t count;
tick = (uint32_t)osRtxInfo.kernel.tick;
count = OS_Tick_GetCount();
if (OS_Tick_GetOverflow() != 0U) {
count = OS_Tick_GetCount();
tick++;
}
count += tick * OS_Tick_GetInterval();
EvrRtxKernelGetSysTimerCount(count);
return count;
}
/// Get the RTOS kernel system timer frequency.
/// \note API identical to osKernelGetSysTimerFreq
static uint32_t svcRtxKernelGetSysTimerFreq (void) {
uint32_t freq = OS_Tick_GetClock();
EvrRtxKernelGetSysTimerFreq(freq);
return freq;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_0 (KernelInitialize, osStatus_t)
SVC0_3 (KernelGetInfo, osStatus_t, osVersion_t *, char *, uint32_t)
SVC0_0 (KernelStart, osStatus_t)
SVC0_0 (KernelLock, int32_t)
SVC0_0 (KernelUnlock, int32_t)
SVC0_1 (KernelRestoreLock, int32_t, int32_t)
SVC0_0 (KernelSuspend, uint32_t)
SVC0_1N(KernelResume, void, uint32_t)
#ifdef RTX_SAFETY_CLASS
SVC0_1 (KernelProtect, osStatus_t, uint32_t)
SVC0_2 (KernelDestroyClass, osStatus_t, uint32_t, uint32_t)
#endif
SVC0_0 (KernelGetState, osKernelState_t)
SVC0_0 (KernelGetTickCount, uint32_t)
SVC0_0 (KernelGetTickFreq, uint32_t)
SVC0_0 (KernelGetSysTimerCount, uint32_t)
SVC0_0 (KernelGetSysTimerFreq, uint32_t)
//lint --flb "Library End"
// ==== Library functions ====
/// RTOS Kernel Pre-Initialization Hook
//lint -esym(759,osRtxKernelBeforeInit) "Prototype in header"
//lint -esym(765,osRtxKernelBeforeInit) "Global scope (can be overridden)"
//lint -esym(522,osRtxKernelBeforeInit) "Can be overridden (do not lack side-effects)"
__WEAK void osRtxKernelBeforeInit (void) {
}
/// RTOS Kernel Error Notification Handler
/// \note API identical to osRtxErrorNotify
uint32_t osRtxKernelErrorNotify (uint32_t code, void *object_id) {
EvrRtxKernelErrorNotify(code, object_id);
return osRtxErrorNotify(code, object_id);
}
// ==== Public API ====
/// Initialize the RTOS Kernel.
osStatus_t osKernelInitialize (void) {
osStatus_t status;
osRtxKernelBeforeInit();
EvrRtxKernelInitialize();
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcKernelInitialize();
}
return status;
}
/// Get RTOS Kernel Information.
osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
osStatus_t status;
EvrRtxKernelGetInfo(version, id_buf, id_size);
if (IsException() || IsIrqMasked() || IsPrivileged()) {
status = svcRtxKernelGetInfo(version, id_buf, id_size);
} else {
status = __svcKernelGetInfo(version, id_buf, id_size);
}
return status;
}
/// Get the current RTOS Kernel state.
osKernelState_t osKernelGetState (void) {
osKernelState_t state;
if (IsException() || IsIrqMasked() || IsPrivileged()) {
state = svcRtxKernelGetState();
} else {
state = __svcKernelGetState();
}
return state;
}
/// Start the RTOS Kernel scheduler.
osStatus_t osKernelStart (void) {
osStatus_t status;
EvrRtxKernelStart();
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcKernelStart();
}
return status;
}
/// Lock the RTOS Kernel scheduler.
int32_t osKernelLock (void) {
int32_t lock;
EvrRtxKernelLock();
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
lock = (int32_t)osErrorISR;
} else {
lock = __svcKernelLock();
}
return lock;
}
/// Unlock the RTOS Kernel scheduler.
int32_t osKernelUnlock (void) {
int32_t lock;
EvrRtxKernelUnlock();
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
lock = (int32_t)osErrorISR;
} else {
lock = __svcKernelUnlock();
}
return lock;
}
/// Restore the RTOS Kernel scheduler lock state.
int32_t osKernelRestoreLock (int32_t lock) {
int32_t lock_new;
EvrRtxKernelRestoreLock(lock);
if (IsException() || IsIrqMasked()) {
if (IsFault() || IsSVCallIrq() || IsPendSvIrq() || IsTickIrq(osRtxInfo.tick_irqn)) {
lock_new = svcRtxKernelRestoreLock(lock);
} else {
EvrRtxKernelError((int32_t)osErrorISR);
lock_new = (int32_t)osErrorISR;
}
} else {
lock_new = __svcKernelRestoreLock(lock);
}
return lock_new;
}
/// Suspend the RTOS Kernel scheduler.
uint32_t osKernelSuspend (void) {
uint32_t ticks;
EvrRtxKernelSuspend();
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
ticks = 0U;
} else {
ticks = __svcKernelSuspend();
}
return ticks;
}
/// Resume the RTOS Kernel scheduler.
void osKernelResume (uint32_t sleep_ticks) {
EvrRtxKernelResume(sleep_ticks);
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
} else {
__svcKernelResume(sleep_ticks);
}
}
#ifdef RTX_SAFETY_CLASS
/// Protect the RTOS Kernel scheduler access.
osStatus_t osKernelProtect (uint32_t safety_class) {
osStatus_t status;
EvrRtxKernelProtect(safety_class);
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcKernelProtect(safety_class);
}
return status;
}
/// Destroy RTOS objects for specified safety classes.
osStatus_t osKernelDestroyClass (uint32_t safety_class, uint32_t mode) {
osStatus_t status;
EvrRtxKernelDestroyClass(safety_class, mode);
if (IsException() || IsIrqMasked()) {
if (IsTickIrq(osRtxInfo.tick_irqn)) {
status = svcRtxKernelDestroyClass(safety_class, mode);
} else {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
}
} else {
status = __svcKernelDestroyClass(safety_class, mode);
}
return status;
}
#endif
/// Get the RTOS kernel tick count.
uint32_t osKernelGetTickCount (void) {
uint32_t count;
if (IsException() || IsIrqMasked()) {
count = svcRtxKernelGetTickCount();
} else {
count = __svcKernelGetTickCount();
}
return count;
}
/// Get the RTOS kernel tick frequency.
uint32_t osKernelGetTickFreq (void) {
uint32_t freq;
if (IsException() || IsIrqMasked()) {
freq = svcRtxKernelGetTickFreq();
} else {
freq = __svcKernelGetTickFreq();
}
return freq;
}
/// Get the RTOS kernel system timer count.
uint32_t osKernelGetSysTimerCount (void) {
uint32_t count;
if (IsException() || IsIrqMasked()) {
count = svcRtxKernelGetSysTimerCount();
} else {
count = __svcKernelGetSysTimerCount();
}
return count;
}
/// Get the RTOS kernel system timer frequency.
uint32_t osKernelGetSysTimerFreq (void) {
uint32_t freq;
if (IsException() || IsIrqMasked()) {
freq = svcRtxKernelGetSysTimerFreq();
} else {
freq = __svcKernelGetSysTimerFreq();
}
return freq;
}

View File

@ -0,0 +1,879 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: RTX Library Configuration
*
* -----------------------------------------------------------------------------
*/
#include "rtx_os.h"
#ifdef CMSIS_device_header
#include CMSIS_device_header
#else
#include "cmsis_compiler.h"
#endif
#ifdef RTE_Compiler_EventRecorder
#include "EventRecorder.h"
#include "EventRecorderConf.h"
#endif
#include "rtx_evr.h"
// System Configuration
// ====================
// Dynamic Memory
#if (OS_DYNAMIC_MEM_SIZE != 0)
#if ((OS_DYNAMIC_MEM_SIZE % 8) != 0)
#error "Invalid Dynamic Memory size!"
#endif
static uint64_t os_mem[OS_DYNAMIC_MEM_SIZE/8] \
__attribute__((section(".bss.os")));
#endif
// Kernel Tick Frequency
#if (OS_TICK_FREQ < 1)
#error "Invalid Kernel Tick Frequency!"
#endif
// ISR FIFO Queue
#if (OS_ISR_FIFO_QUEUE < 4)
#error "Invalid ISR FIFO Queue size!"
#endif
static void *os_isr_queue[OS_ISR_FIFO_QUEUE] \
__attribute__((section(".bss.os")));
// Thread Configuration
// ====================
#if (((OS_STACK_SIZE % 8) != 0) || (OS_STACK_SIZE < 72))
#error "Invalid default Thread Stack size!"
#endif
#if (((OS_IDLE_THREAD_STACK_SIZE % 8) != 0) || (OS_IDLE_THREAD_STACK_SIZE < 72))
#error "Invalid Idle Thread Stack size!"
#endif
#if (OS_THREAD_OBJ_MEM != 0)
#if (OS_THREAD_NUM == 0)
#error "Invalid number of user Threads!"
#endif
#if ((OS_THREAD_USER_STACK_SIZE != 0) && ((OS_THREAD_USER_STACK_SIZE % 8) != 0))
#error "Invalid total Stack size!"
#endif
// Thread Control Blocks
static osRtxThread_t os_thread_cb[OS_THREAD_NUM] \
__attribute__((section(".bss.os.thread.cb")));
// Thread Default Stack
#if (OS_THREAD_DEF_STACK_NUM != 0)
static uint64_t os_thread_def_stack[(OS_THREAD_DEF_STACK_NUM*OS_STACK_SIZE)/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
// Memory Pool for Thread Control Blocks
static osRtxMpInfo_t os_mpi_thread \
__attribute__((section(".data.os.thread.mpi"))) =
{ (uint32_t)OS_THREAD_NUM, 0U, (uint32_t)osRtxThreadCbSize, &os_thread_cb[0], NULL, NULL };
// Memory Pool for Thread Default Stack
#if (OS_THREAD_DEF_STACK_NUM != 0)
static osRtxMpInfo_t os_mpi_def_stack \
__attribute__((section(".data.os.thread.mpi"))) =
{ (uint32_t)OS_THREAD_DEF_STACK_NUM, 0U, (uint32_t)OS_STACK_SIZE, &os_thread_def_stack[0], NULL, NULL };
#endif
// Memory Pool for Thread Stack
#if (OS_THREAD_USER_STACK_SIZE != 0)
static uint64_t os_thread_stack[(16 + (8*OS_THREAD_NUM) + OS_THREAD_USER_STACK_SIZE)/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
#endif // (OS_THREAD_OBJ_MEM != 0)
// Idle Thread Control Block
static osRtxThread_t os_idle_thread_cb \
__attribute__((section(".bss.os.thread.cb")));
// Idle Thread Stack
static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8] \
__attribute__((section(".bss.os.thread.idle.stack")));
// Idle Thread Attributes
static const osThreadAttr_t os_idle_thread_attr = {
//lint -e{835} -e{845} "Zero argument to operator"
#if defined(OS_IDLE_THREAD_NAME)
OS_IDLE_THREAD_NAME,
#else
NULL,
#endif
#ifdef RTX_SAFETY_CLASS
osSafetyClass((uint32_t)OS_IDLE_THREAD_CLASS) |
#endif
#ifdef RTX_EXECUTION_ZONE
osThreadZone((uint32_t)OS_IDLE_THREAD_ZONE) |
#endif
osThreadDetached,
&os_idle_thread_cb,
(uint32_t)sizeof(os_idle_thread_cb),
&os_idle_thread_stack[0],
(uint32_t)sizeof(os_idle_thread_stack),
osPriorityIdle,
#if defined(OS_IDLE_THREAD_TZ_MOD_ID)
(uint32_t)OS_IDLE_THREAD_TZ_MOD_ID,
#else
0U,
#endif
0U
};
// Timer Configuration
// ===================
#if (OS_TIMER_OBJ_MEM != 0)
#if (OS_TIMER_NUM == 0)
#error "Invalid number of Timer objects!"
#endif
// Timer Control Blocks
static osRtxTimer_t os_timer_cb[OS_TIMER_NUM] \
__attribute__((section(".bss.os.timer.cb")));
// Memory Pool for Timer Control Blocks
static osRtxMpInfo_t os_mpi_timer \
__attribute__((section(".data.os.timer.mpi"))) =
{ (uint32_t)OS_TIMER_NUM, 0U, (uint32_t)osRtxTimerCbSize, &os_timer_cb[0], NULL, NULL };
#endif // (OS_TIMER_OBJ_MEM != 0)
#if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0))
#if (((OS_TIMER_THREAD_STACK_SIZE % 8) != 0) || (OS_TIMER_THREAD_STACK_SIZE < 96))
#error "Invalid Timer Thread Stack size!"
#endif
// Timer Thread Control Block
static osRtxThread_t os_timer_thread_cb \
__attribute__((section(".bss.os.thread.cb")));
// Timer Thread Stack
static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8] \
__attribute__((section(".bss.os.thread.timer.stack")));
// Timer Thread Attributes
static const osThreadAttr_t os_timer_thread_attr = {
//lint -e{835} -e{845} "Zero argument to operator"
#if defined(OS_TIMER_THREAD_NAME)
OS_TIMER_THREAD_NAME,
#else
NULL,
#endif
#ifdef RTX_SAFETY_CLASS
osSafetyClass((uint32_t)OS_TIMER_THREAD_CLASS) |
#endif
#ifdef RTX_EXECUTION_ZONE
osThreadZone((uint32_t)OS_TIMER_THREAD_ZONE) |
#endif
osThreadDetached,
&os_timer_thread_cb,
(uint32_t)sizeof(os_timer_thread_cb),
&os_timer_thread_stack[0],
(uint32_t)sizeof(os_timer_thread_stack),
//lint -e{9030} -e{9034} "cast from signed to enum"
(osPriority_t)OS_TIMER_THREAD_PRIO,
#if defined(OS_TIMER_THREAD_TZ_MOD_ID)
(uint32_t)OS_TIMER_THREAD_TZ_MOD_ID,
#else
0U,
#endif
0U
};
// Timer Message Queue Control Block
static osRtxMessageQueue_t os_timer_mq_cb \
__attribute__((section(".bss.os.msgqueue.cb")));
// Timer Message Queue Data
static uint32_t os_timer_mq_data[osRtxMessageQueueMemSize(OS_TIMER_CB_QUEUE,8)/4] \
__attribute__((section(".bss.os.msgqueue.mem")));
// Timer Message Queue Attributes
static const osMessageQueueAttr_t os_timer_mq_attr = {
//lint -e{835} -e{845} "Zero argument to operator"
NULL,
#ifdef RTX_SAFETY_CLASS
osSafetyClass((uint32_t)OS_TIMER_THREAD_CLASS) |
#endif
0U,
&os_timer_mq_cb,
(uint32_t)sizeof(os_timer_mq_cb),
&os_timer_mq_data[0],
(uint32_t)sizeof(os_timer_mq_data)
};
extern int32_t osRtxTimerSetup (void);
extern void osRtxTimerThread (void *argument);
#endif // ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0))
// Event Flags Configuration
// =========================
#if (OS_EVFLAGS_OBJ_MEM != 0)
#if (OS_EVFLAGS_NUM == 0)
#error "Invalid number of Event Flags objects!"
#endif
// Event Flags Control Blocks
static osRtxEventFlags_t os_ef_cb[OS_EVFLAGS_NUM] \
__attribute__((section(".bss.os.evflags.cb")));
// Memory Pool for Event Flags Control Blocks
static osRtxMpInfo_t os_mpi_ef \
__attribute__((section(".data.os.evflags.mpi"))) =
{ (uint32_t)OS_EVFLAGS_NUM, 0U, (uint32_t)osRtxEventFlagsCbSize, &os_ef_cb[0], NULL, NULL };
#endif // (OS_EVFLAGS_OBJ_MEM != 0)
// Mutex Configuration
// ===================
#if (OS_MUTEX_OBJ_MEM != 0)
#if (OS_MUTEX_NUM == 0)
#error "Invalid number of Mutex objects!"
#endif
// Mutex Control Blocks
static osRtxMutex_t os_mutex_cb[OS_MUTEX_NUM] \
__attribute__((section(".bss.os.mutex.cb")));
// Memory Pool for Mutex Control Blocks
static osRtxMpInfo_t os_mpi_mutex \
__attribute__((section(".data.os.mutex.mpi"))) =
{ (uint32_t)OS_MUTEX_NUM, 0U, (uint32_t)osRtxMutexCbSize, &os_mutex_cb[0], NULL, NULL };
#endif // (OS_MUTEX_OBJ_MEM != 0)
// Semaphore Configuration
// =======================
#if (OS_SEMAPHORE_OBJ_MEM != 0)
#if (OS_SEMAPHORE_NUM == 0)
#error "Invalid number of Semaphore objects!"
#endif
// Semaphore Control Blocks
static osRtxSemaphore_t os_semaphore_cb[OS_SEMAPHORE_NUM] \
__attribute__((section(".bss.os.semaphore.cb")));
// Memory Pool for Semaphore Control Blocks
static osRtxMpInfo_t os_mpi_semaphore \
__attribute__((section(".data.os.semaphore.mpi"))) =
{ (uint32_t)OS_SEMAPHORE_NUM, 0U, (uint32_t)osRtxSemaphoreCbSize, &os_semaphore_cb[0], NULL, NULL };
#endif // (OS_SEMAPHORE_OBJ_MEM != 0)
// Memory Pool Configuration
// =========================
#if (OS_MEMPOOL_OBJ_MEM != 0)
#if (OS_MEMPOOL_NUM == 0)
#error "Invalid number of Memory Pool objects!"
#endif
// Memory Pool Control Blocks
static osRtxMemoryPool_t os_mp_cb[OS_MEMPOOL_NUM] \
__attribute__((section(".bss.os.mempool.cb")));
// Memory Pool for Memory Pool Control Blocks
static osRtxMpInfo_t os_mpi_mp \
__attribute__((section(".data.os.mempool.mpi"))) =
{ (uint32_t)OS_MEMPOOL_NUM, 0U, (uint32_t)osRtxMemoryPoolCbSize, &os_mp_cb[0], NULL, NULL };
// Memory Pool for Memory Pool Data Storage
#if (OS_MEMPOOL_DATA_SIZE != 0)
#if ((OS_MEMPOOL_DATA_SIZE % 8) != 0)
#error "Invalid Data Memory size for Memory Pools!"
#endif
static uint64_t os_mp_data[(16 + (8*OS_MEMPOOL_NUM) + OS_MEMPOOL_DATA_SIZE)/8] \
__attribute__((section(".bss.os.mempool.mem")));
#endif
#endif // (OS_MEMPOOL_OBJ_MEM != 0)
// Message Queue Configuration
// ===========================
#if (OS_MSGQUEUE_OBJ_MEM != 0)
#if (OS_MSGQUEUE_NUM == 0)
#error "Invalid number of Message Queue objects!"
#endif
// Message Queue Control Blocks
static osRtxMessageQueue_t os_mq_cb[OS_MSGQUEUE_NUM] \
__attribute__((section(".bss.os.msgqueue.cb")));
// Memory Pool for Message Queue Control Blocks
static osRtxMpInfo_t os_mpi_mq \
__attribute__((section(".data.os.msgqueue.mpi"))) =
{ (uint32_t)OS_MSGQUEUE_NUM, 0U, (uint32_t)osRtxMessageQueueCbSize, &os_mq_cb[0], NULL, NULL };
// Memory Pool for Message Queue Data Storage
#if (OS_MSGQUEUE_DATA_SIZE != 0)
#if ((OS_MSGQUEUE_DATA_SIZE % 8) != 0)
#error "Invalid Data Memory size for Message Queues!"
#endif
static uint64_t os_mq_data[(16 + ((8+12)*OS_MSGQUEUE_NUM) + OS_MSGQUEUE_DATA_SIZE + 7)/8] \
__attribute__((section(".bss.os.msgqueue.mem")));
#endif
#endif // (OS_MSGQUEUE_OBJ_MEM != 0)
// Event Recorder Configuration
// ============================
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
#ifdef RTE_Compiler_EventRecorder
// Event Recorder Initialize
__STATIC_INLINE void evr_initialize (void) {
(void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START);
#if ((OS_EVR_MEMORY_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo);
(void)EventRecorderDisable(~OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo);
#endif
#if ((OS_EVR_KERNEL_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxKernelNo);
(void)EventRecorderDisable(~OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxKernelNo);
#endif
#if ((OS_EVR_THREAD_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo);
(void)EventRecorderDisable(~OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo);
#endif
#if ((OS_EVR_WAIT_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo);
(void)EventRecorderDisable(~OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo);
#endif
#if ((OS_EVR_THFLAGS_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
(void)EventRecorderDisable(~OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
#endif
#if ((OS_EVR_EVFLAGS_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
(void)EventRecorderDisable(~OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
#endif
#if ((OS_EVR_TIMER_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo);
(void)EventRecorderDisable(~OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo);
#endif
#if ((OS_EVR_MUTEX_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo);
(void)EventRecorderDisable(~OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo);
#endif
#if ((OS_EVR_SEMAPHORE_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
(void)EventRecorderDisable(~OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
#endif
#if ((OS_EVR_MEMPOOL_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
(void)EventRecorderDisable(~OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
#endif
#if ((OS_EVR_MSGQUEUE_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
(void)EventRecorderDisable(~OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
#endif
}
#else
#warning "Event Recorder cannot be initialized (Event Recorder component is not selected)!"
#define evr_initialize()
#endif
#endif // (OS_EVR_INIT != 0)
// OS Configuration
// ================
const osRtxConfig_t osRtxConfig \
__USED \
__attribute__((section(".rodata"))) =
{
//lint -e{835} "Zero argument to operator"
0U // Flags
#if (OS_PRIVILEGE_MODE != 0)
| osRtxConfigPrivilegedMode
#endif
#if (OS_STACK_CHECK != 0)
| osRtxConfigStackCheck
#endif
#if (OS_STACK_WATERMARK != 0)
| osRtxConfigStackWatermark
#endif
#ifdef RTX_SAFETY_FEATURES
| osRtxConfigSafetyFeatures
#ifdef RTX_SAFETY_CLASS
| osRtxConfigSafetyClass
#endif
#ifdef RTX_EXECUTION_ZONE
| osRtxConfigExecutionZone
#endif
#ifdef RTX_THREAD_WATCHDOG
| osRtxConfigThreadWatchdog
#endif
#ifdef RTX_OBJ_PTR_CHECK
| osRtxConfigObjPtrCheck
#endif
#ifdef RTX_SVC_PTR_CHECK
| osRtxConfigSVCPtrCheck
#endif
#endif
,
(uint32_t)OS_TICK_FREQ,
#if (OS_ROBIN_ENABLE != 0)
(uint32_t)OS_ROBIN_TIMEOUT,
#else
0U,
#endif
{ &os_isr_queue[0], (uint16_t)(sizeof(os_isr_queue)/sizeof(void *)), 0U },
{
// Memory Pools (Variable Block Size)
#if ((OS_THREAD_OBJ_MEM != 0) && (OS_THREAD_USER_STACK_SIZE != 0))
&os_thread_stack[0], sizeof(os_thread_stack),
#else
NULL, 0U,
#endif
#if ((OS_MEMPOOL_OBJ_MEM != 0) && (OS_MEMPOOL_DATA_SIZE != 0))
&os_mp_data[0], sizeof(os_mp_data),
#else
NULL, 0U,
#endif
#if ((OS_MSGQUEUE_OBJ_MEM != 0) && (OS_MSGQUEUE_DATA_SIZE != 0))
&os_mq_data[0], sizeof(os_mq_data),
#else
NULL, 0U,
#endif
#if (OS_DYNAMIC_MEM_SIZE != 0)
&os_mem[0], (uint32_t)OS_DYNAMIC_MEM_SIZE,
#else
NULL, 0U
#endif
},
{
// Memory Pools (Fixed Block Size)
#if (OS_THREAD_OBJ_MEM != 0)
#if (OS_THREAD_DEF_STACK_NUM != 0)
&os_mpi_def_stack,
#else
NULL,
#endif
&os_mpi_thread,
#else
NULL,
NULL,
#endif
#if (OS_TIMER_OBJ_MEM != 0)
&os_mpi_timer,
#else
NULL,
#endif
#if (OS_EVFLAGS_OBJ_MEM != 0)
&os_mpi_ef,
#else
NULL,
#endif
#if (OS_MUTEX_OBJ_MEM != 0)
&os_mpi_mutex,
#else
NULL,
#endif
#if (OS_SEMAPHORE_OBJ_MEM != 0)
&os_mpi_semaphore,
#else
NULL,
#endif
#if (OS_MEMPOOL_OBJ_MEM != 0)
&os_mpi_mp,
#else
NULL,
#endif
#if (OS_MSGQUEUE_OBJ_MEM != 0)
&os_mpi_mq,
#else
NULL,
#endif
},
(uint32_t)OS_STACK_SIZE,
&os_idle_thread_attr,
#if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0))
&os_timer_thread_attr,
osRtxTimerThread,
osRtxTimerSetup,
&os_timer_mq_attr,
(uint32_t)OS_TIMER_CB_QUEUE
#else
NULL,
NULL,
NULL,
NULL,
0U
#endif
};
// Non weak reference to library irq module
//lint -esym(526,irqRtxLib) "Defined by Exception handlers"
//lint -esym(714,irqRtxLibRef) "Non weak reference"
//lint -esym(765,irqRtxLibRef) "Global scope"
extern const uint8_t irqRtxLib;
extern const uint8_t * const irqRtxLibRef;
const uint8_t * const irqRtxLibRef = &irqRtxLib;
// Default User SVC Table
//lint -esym(714,osRtxUserSVC) "Referenced by Exception handlers"
//lint -esym(765,osRtxUserSVC) "Global scope"
//lint -e{9067} "extern array declared without size"
extern void * const osRtxUserSVC[];
__WEAK void * const osRtxUserSVC[1] = { (void *)0 };
#if (defined(RTX_SAFETY_CLASS) && defined(RTX_OBJ_PTR_CHECK) && \
!((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)))
extern void osRtxTimerDeleteClass(uint32_t safety_class, uint32_t mode);
// Default Timer Delete Class Function.
__WEAK void osRtxTimerDeleteClass(uint32_t safety_class, uint32_t mode) {
(void)safety_class;
(void)mode;
}
#endif
#ifdef RTX_THREAD_WATCHDOG
// Default Watchdog Alarm Handler.
__WEAK uint32_t osWatchdogAlarm_Handler (osThreadId_t thread_id) {
(void)thread_id;
return 0U;
}
#endif
#ifdef RTX_EXECUTION_ZONE
// Default Zone Setup Function.
__WEAK void osZoneSetup_Callback (uint32_t zone) {
(void)zone;
}
#endif
// OS Sections
// ===========
#if defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
// Initialized through linker
//lint -esym(728, __os_thread_cb_start__, __os_thread_cb_end__)
//lint -esym(728, __os_timer_cb_start__, __os_timer_cb_end__)
//lint -esym(728, __os_evflags_cb_start__, __os_evflags_cb_end__)
//lint -esym(728, __os_mutex_cb_start__, __os_mutex_cb_end__)
//lint -esym(728, __os_semaphore_cb_start__, __os_semaphore_cb_end__)
//lint -esym(728, __os_mempool_cb_start__, __os_mempool_cb_end__)
//lint -esym(728, __os_msgqueue_cb_start__, __os_msgqueue_cb_end__)
static const uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base")));
static const uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit")));
static const uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base")));
static const uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit")));
static const uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base")));
static const uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit")));
static const uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base")));
static const uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit")));
static const uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base")));
static const uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit")));
static const uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base")));
static const uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit")));
static const uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base")));
static const uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit")));
#else
extern const uint32_t __os_thread_cb_start__ __attribute__((weak));
extern const uint32_t __os_thread_cb_end__ __attribute__((weak));
extern const uint32_t __os_timer_cb_start__ __attribute__((weak));
extern const uint32_t __os_timer_cb_end__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_start__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_end__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_start__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_end__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_start__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_end__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_start__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_end__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_start__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_end__ __attribute__((weak));
#endif
//lint -e{9067} "extern array declared without size"
extern const uint32_t * const os_cb_sections[];
//lint -esym(714,os_cb_sections) "Referenced by debugger"
//lint -esym(765,os_cb_sections) "Global scope"
const uint32_t * const os_cb_sections[] \
__USED \
__attribute__((section(".rodata"))) =
{
&__os_thread_cb_start__,
&__os_thread_cb_end__,
&__os_timer_cb_start__,
&__os_timer_cb_end__,
&__os_evflags_cb_start__,
&__os_evflags_cb_end__,
&__os_mutex_cb_start__,
&__os_mutex_cb_end__,
&__os_semaphore_cb_start__,
&__os_semaphore_cb_end__,
&__os_mempool_cb_start__,
&__os_mempool_cb_end__,
&__os_msgqueue_cb_start__,
&__os_msgqueue_cb_end__
};
// OS Initialization
// =================
#if defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
#ifndef __MICROLIB
//lint -esym(714,_platform_post_stackheap_init) "Referenced by C library"
//lint -esym(765,_platform_post_stackheap_init) "Global scope"
extern void _platform_post_stackheap_init (void);
__WEAK void _platform_post_stackheap_init (void) {
(void)osKernelInitialize();
}
#endif
#elif defined(__GNUC__)
extern void software_init_hook (void);
__WEAK void software_init_hook (void) {
(void)osKernelInitialize();
}
#elif defined(__ICCARM__)
extern void $Super$$__iar_data_init3 (void);
void $Sub$$__iar_data_init3 (void) {
$Super$$__iar_data_init3();
(void)osKernelInitialize();
}
#endif
// OS Hooks
// ========
// RTOS Kernel Pre-Initialization Hook
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
void osRtxKernelBeforeInit (void);
void osRtxKernelBeforeInit (void) {
if (osKernelGetState() == osKernelInactive) {
evr_initialize();
}
}
#endif
// C/C++ Standard Library Floating-point Initialization
// ====================================================
#if ( !defined(RTX_NO_FP_INIT_CLIB) && \
( defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))) && \
!defined(__MICROLIB))
#if (!defined(__ARM_ARCH_7A__) && \
(defined(__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined(__FPU_USED ) && (__FPU_USED == 1U)))
extern void $Super$$_fp_init (void);
void $Sub$$_fp_init (void);
void $Sub$$_fp_init (void) {
$Super$$_fp_init();
FPU->FPDSCR = __get_FPSCR();
}
#endif
#endif
// C/C++ Standard Library Multithreading Interface
// ===============================================
#if ( !defined(RTX_NO_MULTITHREAD_CLIB) && \
( defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))) && \
!defined(__MICROLIB))
#define LIBSPACE_SIZE 96
//lint -esym(714,__user_perthread_libspace,_mutex_*) "Referenced by C library"
//lint -esym(765,__user_perthread_libspace,_mutex_*) "Global scope"
//lint -esym(9003, os_libspace*) "variables 'os_libspace*' defined at module scope"
// Memory for libspace
static uint32_t os_libspace[OS_THREAD_LIBSPACE_NUM+1][LIBSPACE_SIZE/4] \
__attribute__((section(".bss.os.libspace")));
// Thread IDs for libspace
static osThreadId_t os_libspace_id[OS_THREAD_LIBSPACE_NUM] \
__attribute__((section(".bss.os.libspace")));
// Check if Kernel has been started
static uint32_t os_kernel_is_active (void) {
static uint8_t os_kernel_active = 0U;
if (os_kernel_active == 0U) {
if (osKernelGetState() > osKernelReady) {
os_kernel_active = 1U;
}
}
return (uint32_t)os_kernel_active;
}
// Provide libspace for current thread
void *__user_perthread_libspace (void);
void *__user_perthread_libspace (void) {
osThreadId_t id;
uint32_t n;
if (os_kernel_is_active() != 0U) {
id = osThreadGetId();
for (n = 0U; n < (uint32_t)OS_THREAD_LIBSPACE_NUM; n++) {
if (os_libspace_id[n] == NULL) {
os_libspace_id[n] = id;
}
if (os_libspace_id[n] == id) {
break;
}
}
if (n == (uint32_t)OS_THREAD_LIBSPACE_NUM) {
(void)osRtxKernelErrorNotify(osRtxErrorClibSpace, id);
}
} else {
n = OS_THREAD_LIBSPACE_NUM;
}
//lint -e{9087} "cast between pointers to different object types"
return (void *)&os_libspace[n][0];
}
// Free libspace for specified thread
static void user_perthread_libspace_free (osThreadId_t id) {
uint32_t n;
for (n = 0U; n < (uint32_t)OS_THREAD_LIBSPACE_NUM; n++) {
if (os_libspace_id[n] == id) {
os_libspace_id[n] = NULL;
break;
}
}
}
/// RTOS Thread Before Free Hook
void osRtxThreadBeforeFree (osThreadId_t id);
void osRtxThreadBeforeFree (osThreadId_t id) {
user_perthread_libspace_free(id);
}
// Mutex identifier
typedef void *mutex;
//lint -save "Function prototypes defined in C library"
//lint -e970 "Use of 'int' outside of a typedef"
//lint -e818 "Pointer 'm' could be declared as pointing to const"
// Initialize mutex
__USED
int _mutex_initialize(mutex *m);
int _mutex_initialize(mutex *m) {
int result;
*m = osMutexNew(NULL);
if (*m != NULL) {
result = 1;
} else {
result = 0;
(void)osRtxKernelErrorNotify(osRtxErrorClibMutex, m);
}
return result;
}
// Acquire mutex
__USED
void _mutex_acquire(mutex *m);
void _mutex_acquire(mutex *m) {
if (os_kernel_is_active() != 0U) {
(void)osMutexAcquire(*m, osWaitForever);
}
}
// Release mutex
__USED
void _mutex_release(mutex *m);
void _mutex_release(mutex *m) {
if (os_kernel_is_active() != 0U) {
(void)osMutexRelease(*m);
}
}
// Free mutex
__USED
void _mutex_free(mutex *m);
void _mutex_free(mutex *m) {
(void)osMutexDelete(*m);
}
//lint -restore
#endif

View File

@ -0,0 +1,329 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: RTX Library definitions
*
* -----------------------------------------------------------------------------
*/
#ifndef RTX_LIB_H_
#define RTX_LIB_H_
#include <string.h>
#include "rtx_def.h" // RTX Configuration definitions
#include "rtx_core_c.h" // Cortex core definitions
#ifdef RTX_TZ_CONTEXT
#include "tz_context.h" // TrustZone Context API
#endif
#include "os_tick.h" // CMSIS OS Tick API
#include "cmsis_os2.h" // CMSIS RTOS API
#include "rtx_os.h" // RTX OS definitions
#include "rtx_evr.h" // RTX Event Recorder definitions
// ==== Library defines ====
#define os_thread_t osRtxThread_t
#define os_timer_t osRtxTimer_t
#define os_timer_finfo_t osRtxTimerFinfo_t
#define os_event_flags_t osRtxEventFlags_t
#define os_mutex_t osRtxMutex_t
#define os_semaphore_t osRtxSemaphore_t
#define os_mp_info_t osRtxMpInfo_t
#define os_memory_pool_t osRtxMemoryPool_t
#define os_message_t osRtxMessage_t
#define os_message_queue_t osRtxMessageQueue_t
#define os_object_t osRtxObject_t
// ==== Library sections ====
#if defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
// Referenced through linker
//lint -esym(528, __os_thread_cb_start__, __os_thread_cb_length__)
//lint -esym(528, __os_timer_cb_start__, __os_timer_cb_length__)
//lint -esym(528, __os_evflags_cb_start__, __os_evflags_cb_length__)
//lint -esym(528, __os_mutex_cb_start__, __os_mutex_cb_length__)
//lint -esym(528, __os_semaphore_cb_start__, __os_semaphore_cb_length__)
//lint -esym(528, __os_mempool_cb_start__, __os_mempool_cb_length__)
//lint -esym(528, __os_msgqueue_cb_start__, __os_msgqueue_cb_length__)
// Accessed through linker
//lint -esym(551, __os_thread_cb_start__, __os_thread_cb_length__)
//lint -esym(551, __os_timer_cb_start__, __os_timer_cb_length__)
//lint -esym(551, __os_evflags_cb_start__, __os_evflags_cb_length__)
//lint -esym(551, __os_mutex_cb_start__, __os_mutex_cb_length__)
//lint -esym(551, __os_semaphore_cb_start__, __os_semaphore_cb_length__)
//lint -esym(551, __os_mempool_cb_start__, __os_mempool_cb_length__)
//lint -esym(551, __os_msgqueue_cb_start__, __os_msgqueue_cb_length__)
// Initialized through linker
//lint -esym(728, __os_thread_cb_start__, __os_thread_cb_length__)
//lint -esym(728, __os_timer_cb_start__, __os_timer_cb_length__)
//lint -esym(728, __os_evflags_cb_start__, __os_evflags_cb_length__)
//lint -esym(728, __os_mutex_cb_start__, __os_mutex_cb_length__)
//lint -esym(728, __os_semaphore_cb_start__, __os_semaphore_cb_length__)
//lint -esym(728, __os_mempool_cb_start__, __os_mempool_cb_length__)
//lint -esym(728, __os_msgqueue_cb_start__, __os_msgqueue_cb_length__)
// Global scope
//lint -esym(9003, __os_thread_cb_start__, __os_thread_cb_length__)
//lint -esym(9003, __os_timer_cb_start__, __os_timer_cb_length__)
//lint -esym(9003, __os_evflags_cb_start__, __os_evflags_cb_length__)
//lint -esym(9003, __os_mutex_cb_start__, __os_mutex_cb_length__)
//lint -esym(9003, __os_semaphore_cb_start__, __os_semaphore_cb_length__)
//lint -esym(9003, __os_mempool_cb_start__, __os_mempool_cb_length__)
//lint -esym(9003, __os_msgqueue_cb_start__, __os_msgqueue_cb_length__)
static const uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base")));
static const uint32_t __os_thread_cb_length__ __attribute__((weakref(".bss.os.thread.cb$$Length")));
static const uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base")));
static const uint32_t __os_timer_cb_length__ __attribute__((weakref(".bss.os.timer.cb$$Length")));
static const uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base")));
static const uint32_t __os_evflags_cb_length__ __attribute__((weakref(".bss.os.evflags.cb$$Length")));
static const uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base")));
static const uint32_t __os_mutex_cb_length__ __attribute__((weakref(".bss.os.mutex.cb$$Length")));
static const uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base")));
static const uint32_t __os_semaphore_cb_length__ __attribute__((weakref(".bss.os.semaphore.cb$$Length")));
static const uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base")));
static const uint32_t __os_mempool_cb_length__ __attribute__((weakref(".bss.os.mempool.cb$$Length")));
static const uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base")));
static const uint32_t __os_msgqueue_cb_length__ __attribute__((weakref(".bss.os.msgqueue.cb$$Length")));
#else
extern const uint32_t __os_thread_cb_start__ __attribute__((weak));
extern const uint32_t __os_thread_cb_length__ __attribute__((weak));
extern const uint32_t __os_timer_cb_start__ __attribute__((weak));
extern const uint32_t __os_timer_cb_length__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_start__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_length__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_start__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_length__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_start__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_length__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_start__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_length__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_start__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_length__ __attribute__((weak));
#endif
// ==== Inline functions ====
// Thread ID
__STATIC_INLINE os_thread_t *osRtxThreadId (osThreadId_t thread_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_thread_t *)thread_id);
}
// Timer ID
__STATIC_INLINE os_timer_t *osRtxTimerId (osTimerId_t timer_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_timer_t *)timer_id);
}
// Event Flags ID
__STATIC_INLINE os_event_flags_t *osRtxEventFlagsId (osEventFlagsId_t ef_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_event_flags_t *)ef_id);
}
// Mutex ID
__STATIC_INLINE os_mutex_t *osRtxMutexId (osMutexId_t mutex_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_mutex_t *)mutex_id);
}
// Semaphore ID
__STATIC_INLINE os_semaphore_t *osRtxSemaphoreId (osSemaphoreId_t semaphore_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_semaphore_t *)semaphore_id);
}
// Memory Pool ID
__STATIC_INLINE os_memory_pool_t *osRtxMemoryPoolId (osMemoryPoolId_t mp_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_memory_pool_t *)mp_id);
}
// Message Queue ID
__STATIC_INLINE os_message_queue_t *osRtxMessageQueueId (osMessageQueueId_t mq_id) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 2]
return ((os_message_queue_t *)mq_id);
}
// Generic Object
__STATIC_INLINE os_object_t *osRtxObject (void *object) {
//lint -e{9079} -e{9087} "cast from pointer to void to pointer to object type" [MISRA Note 3]
return ((os_object_t *)object);
}
// Thread Object
__STATIC_INLINE os_thread_t *osRtxThreadObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_thread_t *)object);
}
// Timer Object
__STATIC_INLINE os_timer_t *osRtxTimerObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_timer_t *)object);
}
// Event Flags Object
__STATIC_INLINE os_event_flags_t *osRtxEventFlagsObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_event_flags_t *)object);
}
// Mutex Object
__STATIC_INLINE os_mutex_t *osRtxMutexObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_mutex_t *)object);
}
// Semaphore Object
__STATIC_INLINE os_semaphore_t *osRtxSemaphoreObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_semaphore_t *)object);
}
// Memory Pool Object
__STATIC_INLINE os_memory_pool_t *osRtxMemoryPoolObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_memory_pool_t *)object);
}
// Message Queue Object
__STATIC_INLINE os_message_queue_t *osRtxMessageQueueObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_message_queue_t *)object);
}
// Message Object
__STATIC_INLINE os_message_t *osRtxMessageObject (os_object_t *object) {
//lint -e{740} -e{826} -e{9087} "cast from pointer to generic object to specific object" [MISRA Note 4]
return ((os_message_t *)object);
}
// Kernel State
__STATIC_INLINE osKernelState_t osRtxKernelState (void) {
//lint -e{9030} -e{9034} "cast to enum"
return ((osKernelState_t)(osRtxInfo.kernel.state));
}
// Thread State
__STATIC_INLINE osThreadState_t osRtxThreadState (const os_thread_t *thread) {
uint8_t state = thread->state & osRtxThreadStateMask;
//lint -e{9030} -e{9034} "cast to enum"
return ((osThreadState_t)state);
}
// Thread Priority
__STATIC_INLINE osPriority_t osRtxThreadPriority (const os_thread_t *thread) {
//lint -e{9030} -e{9034} "cast to enum"
return ((osPriority_t)thread->priority);
}
// Kernel Get State
__STATIC_INLINE uint8_t osRtxKernelGetState (void) {
return osRtxInfo.kernel.state;
}
// Thread Get/Set Running
__STATIC_INLINE os_thread_t *osRtxThreadGetRunning (void) {
return osRtxInfo.thread.run.curr;
}
__STATIC_INLINE void osRtxThreadSetRunning (os_thread_t *thread) {
osRtxInfo.thread.run.curr = thread;
}
// ==== Library functions ====
// Kernel Library functions
extern void osRtxKernelBeforeInit (void);
// Thread Library functions
extern void osRtxThreadListPut (os_object_t *object, os_thread_t *thread);
extern os_thread_t *osRtxThreadListGet (os_object_t *object);
extern void osRtxThreadListSort (os_thread_t *thread);
extern void osRtxThreadListRemove (os_thread_t *thread);
extern void osRtxThreadReadyPut (os_thread_t *thread);
//lint -esym(759,osRtxThreadDelayRemove) "Prototype in header"
//lint -esym(765,osRtxThreadDelayRemove) "Global scope"
extern void osRtxThreadDelayRemove (os_thread_t *thread);
extern void osRtxThreadDelayTick (void);
extern uint32_t *osRtxThreadRegPtr (const os_thread_t *thread);
extern void osRtxThreadSwitch (os_thread_t *thread);
extern void osRtxThreadDispatch (os_thread_t *thread);
extern void osRtxThreadWaitExit (os_thread_t *thread, uint32_t ret_val, bool_t dispatch);
extern bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout);
#ifdef RTX_STACK_CHECK
extern bool_t osRtxThreadStackCheck (const os_thread_t *thread);
#endif
#ifdef RTX_THREAD_WATCHDOG
//lint -esym(759,osRtxThreadWatchdogRemove) "Prototype in header"
//lint -esym(765,osRtxThreadWatchdogRemove) "Global scope"
extern void osRtxThreadWatchdogRemove(const os_thread_t *thread);
extern void osRtxThreadWatchdogTick (void);
#endif
//lint -esym(759,osRtxThreadJoinWakeup) "Prototype in header"
//lint -esym(765,osRtxThreadJoinWakeup) "Global scope"
extern void osRtxThreadJoinWakeup (const os_thread_t *thread);
//lint -esym(759,osRtxThreadDestroy) "Prototype in header"
//lint -esym(765,osRtxThreadDestroy) "Global scope"
extern void osRtxThreadDestroy (os_thread_t *thread);
extern void osRtxThreadBeforeFree (os_thread_t *thread);
extern bool_t osRtxThreadStartup (void);
// Timer Library functions
extern int32_t osRtxTimerSetup (void);
extern void osRtxTimerThread (void *argument);
#ifdef RTX_SAFETY_CLASS
extern void osRtxTimerDeleteClass (uint32_t safety_class, uint32_t mode);
#endif
// Mutex Library functions
extern void osRtxMutexOwnerRelease (os_mutex_t *mutex_list);
extern void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_wakeup);
#ifdef RTX_SAFETY_CLASS
extern void osRtxMutexDeleteClass (uint32_t safety_class, uint32_t mode);
#endif
// Semaphore Library functions
#ifdef RTX_SAFETY_CLASS
extern void osRtxSemaphoreDeleteClass (uint32_t safety_class, uint32_t mode);
#endif
// Event Flags Library functions
#ifdef RTX_SAFETY_CLASS
extern void osRtxEventFlagsDeleteClass(uint32_t safety_class, uint32_t mode);
#endif
// Memory Heap Library functions
extern uint32_t osRtxMemoryInit (void *mem, uint32_t size);
extern void *osRtxMemoryAlloc(void *mem, uint32_t size, uint32_t type);
extern uint32_t osRtxMemoryFree (void *mem, void *block);
// Memory Pool Library functions
extern uint32_t osRtxMemoryPoolInit (os_mp_info_t *mp_info, uint32_t block_count, uint32_t block_size, void *block_mem);
extern void *osRtxMemoryPoolAlloc (os_mp_info_t *mp_info);
extern osStatus_t osRtxMemoryPoolFree (os_mp_info_t *mp_info, void *block);
#ifdef RTX_SAFETY_CLASS
extern void osRtxMemoryPoolDeleteClass(uint32_t safety_class, uint32_t mode);
#endif
// Message Queue Library functions
extern int32_t osRtxMessageQueueTimerSetup (void);
#ifdef RTX_SAFETY_CLASS
extern void osRtxMessageQueueDeleteClass(uint32_t safety_class, uint32_t mode);
#endif
// System Library functions
extern void osRtxTick_Handler (void);
extern void osRtxPendSV_Handler (void);
extern void osRtxPostProcess (os_object_t *object);
#endif // RTX_LIB_H_

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Memory functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// Memory Pool Header structure
typedef struct {
uint32_t size; // Memory Pool size
uint32_t used; // Used Memory
} mem_head_t;
// Memory Block Header structure
typedef struct mem_block_s {
struct mem_block_s *next; // Next Memory Block in list
uint32_t info; // Block Info or max used Memory (in last block)
} mem_block_t;
// Memory Block Info: Length = <31:2>:'00', Type = <1:0>
#define MB_INFO_LEN_MASK 0xFFFFFFFCU // Length mask
#define MB_INFO_TYPE_MASK 0x00000003U // Type mask
// Memory Head Pointer
__STATIC_INLINE mem_head_t *MemHeadPtr (void *mem) {
//lint -e{9079} -e{9087} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
return ((mem_head_t *)mem);
}
// Memory Block Pointer
__STATIC_INLINE mem_block_t *MemBlockPtr (void *mem, uint32_t offset) {
uint32_t addr;
mem_block_t *ptr;
//lint --e{923} --e{9078} "cast between pointer and unsigned int" [MISRA Note 8]
addr = (uint32_t)mem + offset;
ptr = (mem_block_t *)addr;
return ptr;
}
// ==== Library functions ====
/// Initialize Memory Pool with variable block size.
/// \param[in] mem pointer to memory pool.
/// \param[in] size size of a memory pool in bytes.
/// \return 1 - success, 0 - failure.
__WEAK uint32_t osRtxMemoryInit (void *mem, uint32_t size) {
mem_head_t *head;
mem_block_t *ptr;
// Check parameters
//lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7]
if ((mem == NULL) || (((uint32_t)mem & 7U) != 0U) || ((size & 7U) != 0U) ||
(size < (sizeof(mem_head_t) + (2U*sizeof(mem_block_t))))) {
EvrRtxMemoryInit(mem, size, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
// Initialize memory pool header
head = MemHeadPtr(mem);
head->size = size;
head->used = sizeof(mem_head_t) + sizeof(mem_block_t);
// Initialize first and last block header
ptr = MemBlockPtr(mem, sizeof(mem_head_t));
ptr->next = MemBlockPtr(mem, size - sizeof(mem_block_t));
ptr->next->next = NULL;
ptr->next->info = sizeof(mem_head_t) + sizeof(mem_block_t);
ptr->info = 0U;
EvrRtxMemoryInit(mem, size, 1U);
return 1U;
}
/// Allocate a memory block from a Memory Pool.
/// \param[in] mem pointer to memory pool.
/// \param[in] size size of a memory block in bytes.
/// \param[in] type memory block type: 0 - generic, 1 - control block
/// \return allocated memory block or NULL in case of no memory is available.
__WEAK void *osRtxMemoryAlloc (void *mem, uint32_t size, uint32_t type) {
mem_block_t *ptr;
mem_block_t *p, *p_new;
uint32_t block_size;
uint32_t hole_size;
// Check parameters
if ((mem == NULL) || (size == 0U) || ((type & ~MB_INFO_TYPE_MASK) != 0U)) {
EvrRtxMemoryAlloc(mem, size, type, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
// Add block header to size
block_size = size + sizeof(mem_block_t);
// Make sure that block is 8-byte aligned
block_size = (block_size + 7U) & ~((uint32_t)7U);
// Search for hole big enough
p = MemBlockPtr(mem, sizeof(mem_head_t));
for (;;) {
//lint -e{923} -e{9078} "cast from pointer to unsigned int"
hole_size = (uint32_t)p->next - (uint32_t)p;
hole_size -= p->info & MB_INFO_LEN_MASK;
if (hole_size >= block_size) {
// Hole found
break;
}
p = p->next;
if (p->next == NULL) {
// Failed (end of list)
EvrRtxMemoryAlloc(mem, size, type, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
// Update used memory
(MemHeadPtr(mem))->used += block_size;
// Update max used memory
p_new = MemBlockPtr(mem, (MemHeadPtr(mem))->size - sizeof(mem_block_t));
if (p_new->info < (MemHeadPtr(mem))->used) {
p_new->info = (MemHeadPtr(mem))->used;
}
// Allocate block
if (p->info == 0U) {
// No block allocated, set info of first element
p->info = block_size | type;
ptr = MemBlockPtr(p, sizeof(mem_block_t));
} else {
// Insert new element into the list
p_new = MemBlockPtr(p, p->info & MB_INFO_LEN_MASK);
p_new->next = p->next;
p_new->info = block_size | type;
p->next = p_new;
ptr = MemBlockPtr(p_new, sizeof(mem_block_t));
}
EvrRtxMemoryAlloc(mem, size, type, ptr);
return ptr;
}
/// Return an allocated memory block back to a Memory Pool.
/// \param[in] mem pointer to memory pool.
/// \param[in] block memory block to be returned to the memory pool.
/// \return 1 - success, 0 - failure.
__WEAK uint32_t osRtxMemoryFree (void *mem, void *block) {
const mem_block_t *ptr;
mem_block_t *p, *p_prev;
// Check parameters
if ((mem == NULL) || (block == NULL)) {
EvrRtxMemoryFree(mem, block, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
// Memory block header
ptr = MemBlockPtr(block, 0U);
ptr--;
// Search for block header
p_prev = NULL;
p = MemBlockPtr(mem, sizeof(mem_head_t));
while (p != ptr) {
p_prev = p;
p = p->next;
if (p == NULL) {
// Not found
EvrRtxMemoryFree(mem, block, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
}
// Update used memory
(MemHeadPtr(mem))->used -= p->info & MB_INFO_LEN_MASK;
// Free block
if (p_prev == NULL) {
// Release first block, only set info to 0
p->info = 0U;
} else {
// Discard block from chained list
p_prev->next = p->next;
}
EvrRtxMemoryFree(mem, block, 1U);
return 1U;
}

View File

@ -0,0 +1,847 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Memory Pool functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Object Memory Usage
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxMemoryPoolMemUsage \
__attribute__((section(".data.os.mempool.obj"))) =
{ 0U, 0U, 0U };
#endif
// ==== Helper functions ====
/// Verify that Memory Pool object pointer is valid.
/// \param[in] mp memory pool object.
/// \return true - valid, false - invalid.
static bool_t IsMemoryPoolPtrValid (const os_memory_pool_t *mp) {
#ifdef RTX_OBJ_PTR_CHECK
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
uint32_t cb_start = (uint32_t)&__os_mempool_cb_start__;
uint32_t cb_length = (uint32_t)&__os_mempool_cb_length__;
// Check the section boundaries
if (((uint32_t)mp - cb_start) >= cb_length) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check the object alignment
if ((((uint32_t)mp - cb_start) % sizeof(os_memory_pool_t)) != 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#else
// Check NULL pointer
if (mp == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#endif
return TRUE;
}
// ==== Library functions ====
/// Initialize Memory Pool.
/// \param[in] mp_info memory pool info.
/// \param[in] block_count maximum number of memory blocks in memory pool.
/// \param[in] block_size size of a memory block in bytes.
/// \param[in] block_mem pointer to memory for block storage.
/// \return 1 - success, 0 - failure.
uint32_t osRtxMemoryPoolInit (os_mp_info_t *mp_info, uint32_t block_count, uint32_t block_size, void *block_mem) {
//lint --e{9079} --e{9087} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
void *mem;
void *block;
// Check parameters
if ((mp_info == NULL) || (block_count == 0U) || (block_size == 0U) || (block_mem == NULL)) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
// Initialize information structure
mp_info->max_blocks = block_count;
mp_info->used_blocks = 0U;
mp_info->block_size = block_size;
mp_info->block_base = block_mem;
mp_info->block_free = block_mem;
mp_info->block_lim = &(((uint8_t *)block_mem)[block_count * block_size]);
EvrRtxMemoryBlockInit(mp_info, block_count, block_size, block_mem);
// Link all free blocks
mem = block_mem;
while (--block_count != 0U) {
block = &((uint8_t *)mem)[block_size];
*((void **)mem) = block;
mem = block;
}
*((void **)mem) = NULL;
return 1U;
}
/// Allocate a memory block from a Memory Pool.
/// \param[in] mp_info memory pool info.
/// \return address of the allocated memory block or NULL in case of no memory is available.
void *osRtxMemoryPoolAlloc (os_mp_info_t *mp_info) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
void *block;
if (mp_info == NULL) {
EvrRtxMemoryBlockAlloc(NULL, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
block = mp_info->block_free;
if (block != NULL) {
//lint --e{9079} --e{9087} "conversion from pointer to void to pointer to other type"
mp_info->block_free = *((void **)block);
mp_info->used_blocks++;
}
if (primask == 0U) {
__enable_irq();
}
#else
block = atomic_link_get(&mp_info->block_free);
if (block != NULL) {
(void)atomic_inc32(&mp_info->used_blocks);
}
#endif
EvrRtxMemoryBlockAlloc(mp_info, block);
return block;
}
/// Return an allocated memory block back to a Memory Pool.
/// \param[in] mp_info memory pool info.
/// \param[in] block address of the allocated memory block to be returned to the memory pool.
/// \return status code that indicates the execution status of the function.
osStatus_t osRtxMemoryPoolFree (os_mp_info_t *mp_info, void *block) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
//lint -e{946} "Relational operator applied to pointers"
if ((mp_info == NULL) || (block < mp_info->block_base) || (block >= mp_info->block_lim)) {
EvrRtxMemoryBlockFree(mp_info, block, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
//lint --e{9079} --e{9087} "conversion from pointer to void to pointer to other type"
*((void **)block) = mp_info->block_free;
mp_info->block_free = block;
mp_info->used_blocks--;
if (primask == 0U) {
__enable_irq();
}
#else
atomic_link_put(&mp_info->block_free, block);
(void)atomic_dec32(&mp_info->used_blocks);
#endif
EvrRtxMemoryBlockFree(mp_info, block, (int32_t)osOK);
return osOK;
}
/// Destroy a Memory Pool object.
/// \param[in] mp memory pool object.
static void osRtxMemoryPoolDestroy (os_memory_pool_t *mp) {
// Mark object as invalid
mp->id = osRtxIdInvalid;
// Free data memory
if ((mp->flags & osRtxFlagSystemMemory) != 0U) {
(void)osRtxMemoryFree(osRtxInfo.mem.mp_data, mp->mp_info.block_base);
}
// Free object memory
if ((mp->flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.memory_pool, mp);
#else
if (osRtxInfo.mpi.memory_pool != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.memory_pool, mp);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mp);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxMemoryPoolMemUsage.cnt_free++;
#endif
}
EvrRtxMemoryPoolDestroyed(mp);
}
#ifdef RTX_SAFETY_CLASS
/// Delete a Memory Pool safety class.
/// \param[in] safety_class safety class.
/// \param[in] mode safety mode.
void osRtxMemoryPoolDeleteClass (uint32_t safety_class, uint32_t mode) {
os_memory_pool_t *mp;
os_thread_t *thread;
uint32_t length;
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
mp = (os_memory_pool_t *)(uint32_t)&__os_mempool_cb_start__;
length = (uint32_t)&__os_mempool_cb_length__;
while (length >= sizeof(os_memory_pool_t)) {
if ( (mp->id == osRtxIdMemoryPool) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((mp->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((mp->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
while (mp->thread_list != NULL) {
thread = osRtxThreadListGet(osRtxObject(mp));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
}
osRtxMemoryPoolDestroy(mp);
}
length -= sizeof(os_memory_pool_t);
mp++;
}
}
#endif
// ==== Post ISR processing ====
/// Memory Pool post ISR processing.
/// \param[in] mp memory pool object.
static void osRtxMemoryPoolPostProcess (os_memory_pool_t *mp) {
void *block;
os_thread_t *thread;
// Check if Thread is waiting to allocate memory
if (mp->thread_list != NULL) {
// Allocate memory
block = osRtxMemoryPoolAlloc(&mp->mp_info);
if (block != NULL) {
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(mp));
//lint -e{923} "cast from pointer to unsigned int"
osRtxThreadWaitExit(thread, (uint32_t)block, FALSE);
EvrRtxMemoryPoolAllocated(mp, block);
}
}
}
// ==== Service Calls ====
/// Create and Initialize a Memory Pool object.
/// \note API identical to osMemoryPoolNew
static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
os_memory_pool_t *mp;
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread = osRtxThreadGetRunning();
uint32_t attr_bits;
#endif
void *mp_mem;
uint32_t mp_size;
uint32_t b_count;
uint32_t b_size;
uint32_t size;
uint8_t flags;
const char *name;
// Check parameters
if ((block_count == 0U) || (block_size == 0U) ||
((__CLZ(block_count) + __CLZ(block_size)) < 32U)) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
b_count = block_count;
b_size = (block_size + 3U) & ~3UL;
size = b_count * b_size;
// Process attributes
if (attr != NULL) {
name = attr->name;
#ifdef RTX_SAFETY_CLASS
attr_bits = attr->attr_bits;
#endif
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
mp = attr->cb_mem;
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
mp_mem = attr->mp_mem;
mp_size = attr->mp_size;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) <
(uint8_t)((attr_bits & osSafetyClass_Msk) >> osSafetyClass_Pos))) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
#endif
if (mp != NULL) {
if (!IsMemoryPoolPtrValid(mp) || (attr->cb_size != sizeof(os_memory_pool_t))) {
EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (attr->cb_size != 0U) {
EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
if (mp_mem != NULL) {
//lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7]
if ((((uint32_t)mp_mem & 3U) != 0U) || (mp_size < size)) {
EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidDataMemory);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (mp_size != 0U) {
EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidDataMemory);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
} else {
name = NULL;
#ifdef RTX_SAFETY_CLASS
attr_bits = 0U;
#endif
mp = NULL;
mp_mem = NULL;
}
// Allocate object memory if not provided
if (mp == NULL) {
if (osRtxInfo.mpi.memory_pool != NULL) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mp = osRtxMemoryPoolAlloc(osRtxInfo.mpi.memory_pool);
#ifndef RTX_OBJ_PTR_CHECK
} else {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mp = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_memory_pool_t), 1U);
#endif
}
#ifdef RTX_OBJ_MEM_USAGE
if (mp != NULL) {
uint32_t used;
osRtxMemoryPoolMemUsage.cnt_alloc++;
used = osRtxMemoryPoolMemUsage.cnt_alloc - osRtxMemoryPoolMemUsage.cnt_free;
if (osRtxMemoryPoolMemUsage.max_used < used) {
osRtxMemoryPoolMemUsage.max_used = used;
}
}
#endif
flags = osRtxFlagSystemObject;
} else {
flags = 0U;
}
// Allocate data memory if not provided
if ((mp != NULL) && (mp_mem == NULL)) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mp_mem = osRtxMemoryAlloc(osRtxInfo.mem.mp_data, size, 0U);
if (mp_mem == NULL) {
if ((flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.memory_pool, mp);
#else
if (osRtxInfo.mpi.memory_pool != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.memory_pool, mp);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mp);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxMemoryPoolMemUsage.cnt_free++;
#endif
}
mp = NULL;
} else {
(void)memset(mp_mem, 0, size);
}
flags |= osRtxFlagSystemMemory;
}
if (mp != NULL) {
// Initialize control block
mp->id = osRtxIdMemoryPool;
mp->flags = flags;
mp->attr = 0U;
mp->name = name;
mp->thread_list = NULL;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
mp->attr |= (uint8_t)((attr_bits & osSafetyClass_Msk) >>
(osSafetyClass_Pos - osRtxAttrClass_Pos));
} else {
// Inherit safety class from the running thread
if (thread != NULL) {
mp->attr |= (uint8_t)(thread->attr & osRtxAttrClass_Msk);
}
}
#endif
(void)osRtxMemoryPoolInit(&mp->mp_info, b_count, b_size, mp_mem);
// Register post ISR processing function
osRtxInfo.post_process.memory_pool = osRtxMemoryPoolPostProcess;
EvrRtxMemoryPoolCreated(mp, mp->name);
} else {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorNoMemory);
}
return mp;
}
/// Get name of a Memory Pool object.
/// \note API identical to osMemoryPoolGetName
static const char *svcRtxMemoryPoolGetName (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolGetName(mp, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxMemoryPoolGetName(mp, mp->name);
return mp->name;
}
/// Allocate a memory block from a Memory Pool.
/// \note API identical to osMemoryPoolAlloc
static void *svcRtxMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
void *block;
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (mp->attr >> osRtxAttrClass_Pos))) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
#endif
// Allocate memory
block = osRtxMemoryPoolAlloc(&mp->mp_info);
if (block != NULL) {
EvrRtxMemoryPoolAllocated(mp, block);
} else {
// No memory available
if (timeout != 0U) {
EvrRtxMemoryPoolAllocPending(mp, timeout);
// Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingMemoryPool, timeout)) {
osRtxThreadListPut(osRtxObject(mp), osRtxThreadGetRunning());
} else {
EvrRtxMemoryPoolAllocTimeout(mp);
}
} else {
EvrRtxMemoryPoolAllocFailed(mp);
}
}
return block;
}
/// Return an allocated memory block back to a Memory Pool.
/// \note API identical to osMemoryPoolFree
static osStatus_t svcRtxMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
void *block0;
os_thread_t *thread;
osStatus_t status;
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (mp->attr >> osRtxAttrClass_Pos))) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Free memory
status = osRtxMemoryPoolFree(&mp->mp_info, block);
if (status == osOK) {
EvrRtxMemoryPoolDeallocated(mp, block);
// Check if Thread is waiting to allocate memory
if (mp->thread_list != NULL) {
// Allocate memory
block0 = osRtxMemoryPoolAlloc(&mp->mp_info);
if (block0 != NULL) {
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(mp));
//lint -e{923} "cast from pointer to unsigned int"
osRtxThreadWaitExit(thread, (uint32_t)block0, TRUE);
EvrRtxMemoryPoolAllocated(mp, block0);
}
}
} else {
EvrRtxMemoryPoolFreeFailed(mp, block);
}
return status;
}
/// Get maximum number of memory blocks in a Memory Pool.
/// \note API identical to osMemoryPoolGetCapacity
static uint32_t svcRtxMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolGetCapacity(mp, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxMemoryPoolGetCapacity(mp, mp->mp_info.max_blocks);
return mp->mp_info.max_blocks;
}
/// Get memory block size in a Memory Pool.
/// \note API identical to osMemoryPoolGetBlockSize
static uint32_t svcRtxMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolGetBlockSize(mp, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxMemoryPoolGetBlockSize(mp, mp->mp_info.block_size);
return mp->mp_info.block_size;
}
/// Get number of memory blocks used in a Memory Pool.
/// \note API identical to osMemoryPoolGetCount
static uint32_t svcRtxMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolGetCount(mp, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxMemoryPoolGetCount(mp, mp->mp_info.used_blocks);
return mp->mp_info.used_blocks;
}
/// Get number of memory blocks available in a Memory Pool.
/// \note API identical to osMemoryPoolGetSpace
static uint32_t svcRtxMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolGetSpace(mp, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxMemoryPoolGetSpace(mp, mp->mp_info.max_blocks - mp->mp_info.used_blocks);
return (mp->mp_info.max_blocks - mp->mp_info.used_blocks);
}
/// Delete a Memory Pool object.
/// \note API identical to osMemoryPoolDelete
static osStatus_t svcRtxMemoryPoolDelete (osMemoryPoolId_t mp_id) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
os_thread_t *thread;
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (mp->attr >> osRtxAttrClass_Pos))) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Unblock waiting threads
if (mp->thread_list != NULL) {
do {
thread = osRtxThreadListGet(osRtxObject(mp));
osRtxThreadWaitExit(thread, 0U, FALSE);
} while (mp->thread_list != NULL);
osRtxThreadDispatch(NULL);
}
osRtxMemoryPoolDestroy(mp);
return osOK;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_3(MemoryPoolNew, osMemoryPoolId_t, uint32_t, uint32_t, const osMemoryPoolAttr_t *)
SVC0_1(MemoryPoolGetName, const char *, osMemoryPoolId_t)
SVC0_2(MemoryPoolAlloc, void *, osMemoryPoolId_t, uint32_t)
SVC0_2(MemoryPoolFree, osStatus_t, osMemoryPoolId_t, void *)
SVC0_1(MemoryPoolGetCapacity, uint32_t, osMemoryPoolId_t)
SVC0_1(MemoryPoolGetBlockSize, uint32_t, osMemoryPoolId_t)
SVC0_1(MemoryPoolGetCount, uint32_t, osMemoryPoolId_t)
SVC0_1(MemoryPoolGetSpace, uint32_t, osMemoryPoolId_t)
SVC0_1(MemoryPoolDelete, osStatus_t, osMemoryPoolId_t)
//lint --flb "Library End"
// ==== ISR Calls ====
/// Allocate a memory block from a Memory Pool.
/// \note API identical to osMemoryPoolAlloc
__STATIC_INLINE
void *isrRtxMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
void *block;
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool) || (timeout != 0U)) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
// Allocate memory
block = osRtxMemoryPoolAlloc(&mp->mp_info);
if (block == NULL) {
EvrRtxMemoryPoolAllocFailed(mp);
} else {
EvrRtxMemoryPoolAllocated(mp, block);
}
return block;
}
/// Return an allocated memory block back to a Memory Pool.
/// \note API identical to osMemoryPoolFree
__STATIC_INLINE
osStatus_t isrRtxMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
os_memory_pool_t *mp = osRtxMemoryPoolId(mp_id);
osStatus_t status;
// Check parameters
if (!IsMemoryPoolPtrValid(mp) || (mp->id != osRtxIdMemoryPool)) {
EvrRtxMemoryPoolError(mp, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
// Free memory
status = osRtxMemoryPoolFree(&mp->mp_info, block);
if (status == osOK) {
// Register post ISR processing
osRtxPostProcess(osRtxObject(mp));
EvrRtxMemoryPoolDeallocated(mp, block);
} else {
EvrRtxMemoryPoolFreeFailed(mp, block);
}
return status;
}
// ==== Public API ====
/// Create and Initialize a Memory Pool object.
osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
osMemoryPoolId_t mp_id;
EvrRtxMemoryPoolNew(block_count, block_size, attr);
if (IsException() || IsIrqMasked()) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorISR);
mp_id = NULL;
} else {
mp_id = __svcMemoryPoolNew(block_count, block_size, attr);
}
return mp_id;
}
/// Get name of a Memory Pool object.
const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
const char *name;
if (IsException() || IsIrqMasked()) {
name = svcRtxMemoryPoolGetName(mp_id);
} else {
name = __svcMemoryPoolGetName(mp_id);
}
return name;
}
/// Allocate a memory block from a Memory Pool.
void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
void *memory;
EvrRtxMemoryPoolAlloc(mp_id, timeout);
if (IsException() || IsIrqMasked()) {
memory = isrRtxMemoryPoolAlloc(mp_id, timeout);
} else {
memory = __svcMemoryPoolAlloc(mp_id, timeout);
}
return memory;
}
/// Return an allocated memory block back to a Memory Pool.
osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
osStatus_t status;
EvrRtxMemoryPoolFree(mp_id, block);
if (IsException() || IsIrqMasked()) {
status = isrRtxMemoryPoolFree(mp_id, block);
} else {
status = __svcMemoryPoolFree(mp_id, block);
}
return status;
}
/// Get maximum number of memory blocks in a Memory Pool.
uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
uint32_t capacity;
if (IsException() || IsIrqMasked()) {
capacity = svcRtxMemoryPoolGetCapacity(mp_id);
} else {
capacity = __svcMemoryPoolGetCapacity(mp_id);
}
return capacity;
}
/// Get memory block size in a Memory Pool.
uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
uint32_t block_size;
if (IsException() || IsIrqMasked()) {
block_size = svcRtxMemoryPoolGetBlockSize(mp_id);
} else {
block_size = __svcMemoryPoolGetBlockSize(mp_id);
}
return block_size;
}
/// Get number of memory blocks used in a Memory Pool.
uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
uint32_t count;
if (IsException() || IsIrqMasked()) {
count = svcRtxMemoryPoolGetCount(mp_id);
} else {
count = __svcMemoryPoolGetCount(mp_id);
}
return count;
}
/// Get number of memory blocks available in a Memory Pool.
uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
uint32_t space;
if (IsException() || IsIrqMasked()) {
space = svcRtxMemoryPoolGetSpace(mp_id);
} else {
space = __svcMemoryPoolGetSpace(mp_id);
}
return space;
}
/// Delete a Memory Pool object.
osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
osStatus_t status;
EvrRtxMemoryPoolDelete(mp_id);
if (IsException() || IsIrqMasked()) {
EvrRtxMemoryPoolError(mp_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcMemoryPoolDelete(mp_id);
}
return status;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,702 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Mutex functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Object Memory Usage
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxMutexMemUsage \
__attribute__((section(".data.os.mutex.obj"))) =
{ 0U, 0U, 0U };
#endif
// ==== Helper functions ====
/// Verify that Mutex object pointer is valid.
/// \param[in] mutex mutex object.
/// \return true - valid, false - invalid.
static bool_t IsMutexPtrValid (const os_mutex_t *mutex) {
#ifdef RTX_OBJ_PTR_CHECK
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
uint32_t cb_start = (uint32_t)&__os_mutex_cb_start__;
uint32_t cb_length = (uint32_t)&__os_mutex_cb_length__;
// Check the section boundaries
if (((uint32_t)mutex - cb_start) >= cb_length) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check the object alignment
if ((((uint32_t)mutex - cb_start) % sizeof(os_mutex_t)) != 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#else
// Check NULL pointer
if (mutex == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#endif
return TRUE;
}
// ==== Library functions ====
/// Release Mutex list when owner Thread terminates.
/// \param[in] mutex_list mutex list.
void osRtxMutexOwnerRelease (os_mutex_t *mutex_list) {
os_mutex_t *mutex;
os_mutex_t *mutex_next;
os_thread_t *thread;
mutex = mutex_list;
while (mutex != NULL) {
mutex_next = mutex->owner_next;
// Check if Mutex is Robust
if ((mutex->attr & osMutexRobust) != 0U) {
// Clear Lock counter
mutex->lock = 0U;
EvrRtxMutexReleased(mutex, 0U);
// Check if Thread is waiting for a Mutex
if (mutex->thread_list != NULL) {
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(mutex));
osRtxThreadWaitExit(thread, (uint32_t)osOK, FALSE);
// Thread is the new Mutex owner
mutex->owner_thread = thread;
mutex->owner_prev = NULL;
mutex->owner_next = thread->mutex_list;
if (thread->mutex_list != NULL) {
thread->mutex_list->owner_prev = mutex;
}
thread->mutex_list = mutex;
mutex->lock = 1U;
EvrRtxMutexAcquired(mutex, 1U);
}
}
mutex = mutex_next;
}
}
/// Restore Mutex owner Thread priority.
/// \param[in] mutex mutex object.
/// \param[in] thread_wakeup thread wakeup object.
void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_wakeup) {
const os_mutex_t *mutex0;
os_thread_t *thread;
const os_thread_t *thread0;
int8_t priority;
// Restore owner Thread priority
if ((mutex->attr & osMutexPrioInherit) != 0U) {
thread = mutex->owner_thread;
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check Mutexes owned by Thread
do {
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
// Check Threads waiting for Mutex
thread0 = mutex0->thread_list;
if (thread0 == thread_wakeup) {
// Skip thread that is waken-up
thread0 = thread0->thread_next;
}
if ((thread0 != NULL) && (thread0->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = thread0->priority;
}
}
mutex0 = mutex0->owner_next;
} while (mutex0 != NULL);
if (thread->priority != priority) {
thread->priority = priority;
osRtxThreadListSort(thread);
}
}
}
/// Unlock Mutex owner when mutex is deleted.
/// \param[in] mutex mutex object.
/// \return true - successful, false - not locked.
static bool_t osRtxMutexOwnerUnlock (os_mutex_t *mutex) {
const os_mutex_t *mutex0;
os_thread_t *thread;
int8_t priority;
// Check if Mutex is locked
if (mutex->lock == 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
thread = mutex->owner_thread;
// Remove Mutex from Thread owner list
if (mutex->owner_next != NULL) {
mutex->owner_next->owner_prev = mutex->owner_prev;
}
if (mutex->owner_prev != NULL) {
mutex->owner_prev->owner_next = mutex->owner_next;
} else {
thread->mutex_list = mutex->owner_next;
}
// Restore owner Thread priority
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check Mutexes owned by Thread
while (mutex0 != NULL) {
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = mutex0->thread_list->priority;
}
}
mutex0 = mutex0->owner_next;
}
if (thread->priority != priority) {
thread->priority = priority;
osRtxThreadListSort(thread);
}
// Unblock waiting threads
while (mutex->thread_list != NULL) {
thread = osRtxThreadListGet(osRtxObject(mutex));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
}
mutex->lock = 0U;
return TRUE;
}
/// Destroy a Mutex object.
/// \param[in] mutex mutex object.
static void osRtxMutexDestroy (os_mutex_t *mutex) {
// Mark object as invalid
mutex->id = osRtxIdInvalid;
// Free object memory
if ((mutex->flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.mutex, mutex);
#else
if (osRtxInfo.mpi.mutex != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.mutex, mutex);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mutex);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxMutexMemUsage.cnt_free++;
#endif
}
EvrRtxMutexDestroyed(mutex);
}
#ifdef RTX_SAFETY_CLASS
/// Delete a Mutex safety class.
/// \param[in] safety_class safety class.
/// \param[in] mode safety mode.
void osRtxMutexDeleteClass (uint32_t safety_class, uint32_t mode) {
os_mutex_t *mutex;
uint32_t length;
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
mutex = (os_mutex_t *)(uint32_t)&__os_mutex_cb_start__;
length = (uint32_t)&__os_mutex_cb_length__;
while (length >= sizeof(os_mutex_t)) {
if ( (mutex->id == osRtxIdMutex) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((mutex->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((mutex->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
(void)osRtxMutexOwnerUnlock(mutex);
osRtxMutexDestroy(mutex);
}
length -= sizeof(os_mutex_t);
mutex++;
}
}
#endif
// ==== Service Calls ====
/// Create and Initialize a Mutex object.
/// \note API identical to osMutexNew
static osMutexId_t svcRtxMutexNew (const osMutexAttr_t *attr) {
os_mutex_t *mutex;
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread = osRtxThreadGetRunning();
#endif
uint32_t attr_bits;
uint8_t flags;
const char *name;
// Process attributes
if (attr != NULL) {
name = attr->name;
attr_bits = attr->attr_bits;
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
mutex = attr->cb_mem;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) <
(uint8_t)((attr_bits & osSafetyClass_Msk) >> osSafetyClass_Pos))) {
EvrRtxMutexError(NULL, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
#endif
if (mutex != NULL) {
if (!IsMutexPtrValid(mutex) || (attr->cb_size != sizeof(os_mutex_t))) {
EvrRtxMutexError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (attr->cb_size != 0U) {
EvrRtxMutexError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
} else {
name = NULL;
attr_bits = 0U;
mutex = NULL;
}
// Allocate object memory if not provided
if (mutex == NULL) {
if (osRtxInfo.mpi.mutex != NULL) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mutex = osRtxMemoryPoolAlloc(osRtxInfo.mpi.mutex);
#ifndef RTX_OBJ_PTR_CHECK
} else {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mutex = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_mutex_t), 1U);
#endif
}
#ifdef RTX_OBJ_MEM_USAGE
if (mutex != NULL) {
uint32_t used;
osRtxMutexMemUsage.cnt_alloc++;
used = osRtxMutexMemUsage.cnt_alloc - osRtxMutexMemUsage.cnt_free;
if (osRtxMutexMemUsage.max_used < used) {
osRtxMutexMemUsage.max_used = used;
}
}
#endif
flags = osRtxFlagSystemObject;
} else {
flags = 0U;
}
if (mutex != NULL) {
// Initialize control block
mutex->id = osRtxIdMutex;
mutex->flags = flags;
mutex->attr = (uint8_t)(attr_bits & ~osRtxAttrClass_Msk);
mutex->name = name;
mutex->thread_list = NULL;
mutex->owner_thread = NULL;
mutex->owner_prev = NULL;
mutex->owner_next = NULL;
mutex->lock = 0U;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
mutex->attr |= (uint8_t)((attr_bits & osSafetyClass_Msk) >>
(osSafetyClass_Pos - osRtxAttrClass_Pos));
} else {
// Inherit safety class from the running thread
if (thread != NULL) {
mutex->attr |= (uint8_t)(thread->attr & osRtxAttrClass_Msk);
}
}
#endif
EvrRtxMutexCreated(mutex, mutex->name);
} else {
EvrRtxMutexError(NULL, (int32_t)osErrorNoMemory);
}
return mutex;
}
/// Get name of a Mutex object.
/// \note API identical to osMutexGetName
static const char *svcRtxMutexGetName (osMutexId_t mutex_id) {
os_mutex_t *mutex = osRtxMutexId(mutex_id);
// Check parameters
if (!IsMutexPtrValid(mutex) || (mutex->id != osRtxIdMutex)) {
EvrRtxMutexGetName(mutex, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxMutexGetName(mutex, mutex->name);
return mutex->name;
}
/// Acquire a Mutex or timeout if it is locked.
/// \note API identical to osMutexAcquire
static osStatus_t svcRtxMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
os_mutex_t *mutex = osRtxMutexId(mutex_id);
os_thread_t *thread;
osStatus_t status;
// Check running thread
thread = osRtxThreadGetRunning();
if (thread == NULL) {
EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
// Check parameters
if (!IsMutexPtrValid(mutex) || (mutex->id != osRtxIdMutex)) {
EvrRtxMutexError(mutex, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
if ((thread->attr >> osRtxAttrClass_Pos) < (mutex->attr >> osRtxAttrClass_Pos)) {
EvrRtxMutexError(mutex, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Check if Mutex is not locked
if (mutex->lock == 0U) {
// Acquire Mutex
mutex->owner_thread = thread;
mutex->owner_prev = NULL;
mutex->owner_next = thread->mutex_list;
if (thread->mutex_list != NULL) {
thread->mutex_list->owner_prev = mutex;
}
thread->mutex_list = mutex;
mutex->lock = 1U;
EvrRtxMutexAcquired(mutex, mutex->lock);
status = osOK;
} else {
// Check if Mutex is recursive and running Thread is the owner
if (((mutex->attr & osMutexRecursive) != 0U) && (mutex->owner_thread == thread)) {
// Try to increment lock counter
if (mutex->lock == osRtxMutexLockLimit) {
EvrRtxMutexError(mutex, osRtxErrorMutexLockLimit);
status = osErrorResource;
} else {
mutex->lock++;
EvrRtxMutexAcquired(mutex, mutex->lock);
status = osOK;
}
} else {
// Check if timeout is specified
if (timeout != 0U) {
// Check if Priority inheritance protocol is enabled
if ((mutex->attr & osMutexPrioInherit) != 0U) {
// Raise priority of owner Thread if lower than priority of running Thread
if (mutex->owner_thread->priority < thread->priority) {
mutex->owner_thread->priority = thread->priority;
osRtxThreadListSort(mutex->owner_thread);
}
}
EvrRtxMutexAcquirePending(mutex, timeout);
// Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingMutex, timeout)) {
osRtxThreadListPut(osRtxObject(mutex), thread);
} else {
EvrRtxMutexAcquireTimeout(mutex);
}
status = osErrorTimeout;
} else {
EvrRtxMutexNotAcquired(mutex);
status = osErrorResource;
}
}
}
return status;
}
/// Release a Mutex that was acquired by osMutexAcquire.
/// \note API identical to osMutexRelease
static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
os_mutex_t *mutex = osRtxMutexId(mutex_id);
const os_mutex_t *mutex0;
os_thread_t *thread;
int8_t priority;
// Check running thread
thread = osRtxThreadGetRunning();
if (thread == NULL) {
EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
// Check parameters
if (!IsMutexPtrValid(mutex) || (mutex->id != osRtxIdMutex)) {
EvrRtxMutexError(mutex, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
// Check if Mutex is not locked
if (mutex->lock == 0U) {
EvrRtxMutexError(mutex, osRtxErrorMutexNotLocked);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource;
}
// Check if running Thread is not the owner
if (mutex->owner_thread != thread) {
EvrRtxMutexError(mutex, osRtxErrorMutexNotOwned);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource;
}
// Decrement Lock counter
mutex->lock--;
EvrRtxMutexReleased(mutex, mutex->lock);
// Check Lock counter
if (mutex->lock == 0U) {
// Remove Mutex from Thread owner list
if (mutex->owner_next != NULL) {
mutex->owner_next->owner_prev = mutex->owner_prev;
}
if (mutex->owner_prev != NULL) {
mutex->owner_prev->owner_next = mutex->owner_next;
} else {
thread->mutex_list = mutex->owner_next;
}
// Restore running Thread priority
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check mutexes owned by running Thread
while (mutex0 != NULL) {
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = mutex0->thread_list->priority;
}
}
mutex0 = mutex0->owner_next;
}
thread->priority = priority;
// Check if Thread is waiting for a Mutex
if (mutex->thread_list != NULL) {
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(mutex));
osRtxThreadWaitExit(thread, (uint32_t)osOK, FALSE);
// Thread is the new Mutex owner
mutex->owner_thread = thread;
mutex->owner_prev = NULL;
mutex->owner_next = thread->mutex_list;
if (thread->mutex_list != NULL) {
thread->mutex_list->owner_prev = mutex;
}
thread->mutex_list = mutex;
mutex->lock = 1U;
EvrRtxMutexAcquired(mutex, 1U);
}
osRtxThreadDispatch(NULL);
}
return osOK;
}
/// Get Thread which owns a Mutex object.
/// \note API identical to osMutexGetOwner
static osThreadId_t svcRtxMutexGetOwner (osMutexId_t mutex_id) {
os_mutex_t *mutex = osRtxMutexId(mutex_id);
// Check parameters
if (!IsMutexPtrValid(mutex) || (mutex->id != osRtxIdMutex)) {
EvrRtxMutexGetOwner(mutex, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
// Check if Mutex is not locked
if (mutex->lock == 0U) {
EvrRtxMutexGetOwner(mutex, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxMutexGetOwner(mutex, mutex->owner_thread);
return mutex->owner_thread;
}
/// Delete a Mutex object.
/// \note API identical to osMutexDelete
static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) {
os_mutex_t *mutex = osRtxMutexId(mutex_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
// Check parameters
if (!IsMutexPtrValid(mutex) || (mutex->id != osRtxIdMutex)) {
EvrRtxMutexError(mutex, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (mutex->attr >> osRtxAttrClass_Pos))) {
EvrRtxMutexError(mutex, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Unlock the mutex owner
if (osRtxMutexOwnerUnlock(mutex)) {
osRtxThreadDispatch(NULL);
}
osRtxMutexDestroy(mutex);
return osOK;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_1(MutexNew, osMutexId_t, const osMutexAttr_t *)
SVC0_1(MutexGetName, const char *, osMutexId_t)
SVC0_2(MutexAcquire, osStatus_t, osMutexId_t, uint32_t)
SVC0_1(MutexRelease, osStatus_t, osMutexId_t)
SVC0_1(MutexGetOwner, osThreadId_t, osMutexId_t)
SVC0_1(MutexDelete, osStatus_t, osMutexId_t)
//lint --flb "Library End"
// ==== Public API ====
/// Create and Initialize a Mutex object.
osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
osMutexId_t mutex_id;
EvrRtxMutexNew(attr);
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(NULL, (int32_t)osErrorISR);
mutex_id = NULL;
} else {
mutex_id = __svcMutexNew(attr);
}
return mutex_id;
}
/// Get name of a Mutex object.
const char *osMutexGetName (osMutexId_t mutex_id) {
const char *name;
if (IsException() || IsIrqMasked()) {
name = svcRtxMutexGetName(mutex_id);
} else {
name = __svcMutexGetName(mutex_id);
}
return name;
}
/// Acquire a Mutex or timeout if it is locked.
osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
osStatus_t status;
EvrRtxMutexAcquire(mutex_id, timeout);
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcMutexAcquire(mutex_id, timeout);
}
return status;
}
/// Release a Mutex that was acquired by \ref osMutexAcquire.
osStatus_t osMutexRelease (osMutexId_t mutex_id) {
osStatus_t status;
EvrRtxMutexRelease(mutex_id);
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcMutexRelease(mutex_id);
}
return status;
}
/// Get Thread which owns a Mutex object.
osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
osThreadId_t thread;
if (IsException() || IsIrqMasked()) {
EvrRtxMutexGetOwner(mutex_id, NULL);
thread = NULL;
} else {
thread = __svcMutexGetOwner(mutex_id);
}
return thread;
}
/// Delete a Mutex object.
osStatus_t osMutexDelete (osMutexId_t mutex_id) {
osStatus_t status;
EvrRtxMutexDelete(mutex_id);
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcMutexDelete(mutex_id);
}
return status;
}

View File

@ -0,0 +1,648 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Semaphore functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Object Memory Usage
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxSemaphoreMemUsage \
__attribute__((section(".data.os.semaphore.obj"))) =
{ 0U, 0U, 0U };
#endif
// ==== Helper functions ====
/// Decrement Semaphore tokens.
/// \param[in] semaphore semaphore object.
/// \return 1 - success, 0 - failure.
static uint32_t SemaphoreTokenDecrement (os_semaphore_t *semaphore) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
uint32_t ret;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
if (semaphore->tokens != 0U) {
semaphore->tokens--;
ret = 1U;
} else {
ret = 0U;
}
if (primask == 0U) {
__enable_irq();
}
#else
if (atomic_dec16_nz(&semaphore->tokens) != 0U) {
ret = 1U;
} else {
ret = 0U;
}
#endif
return ret;
}
/// Increment Semaphore tokens.
/// \param[in] semaphore semaphore object.
/// \return 1 - success, 0 - failure.
static uint32_t SemaphoreTokenIncrement (os_semaphore_t *semaphore) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#endif
uint32_t ret;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
if (semaphore->tokens < semaphore->max_tokens) {
semaphore->tokens++;
ret = 1U;
} else {
ret = 0U;
}
if (primask == 0U) {
__enable_irq();
}
#else
if (atomic_inc16_lt(&semaphore->tokens, semaphore->max_tokens) < semaphore->max_tokens) {
ret = 1U;
} else {
ret = 0U;
}
#endif
return ret;
}
/// Verify that Semaphore object pointer is valid.
/// \param[in] semaphore semaphore object.
/// \return true - valid, false - invalid.
static bool_t IsSemaphorePtrValid (const os_semaphore_t *semaphore) {
#ifdef RTX_OBJ_PTR_CHECK
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
uint32_t cb_start = (uint32_t)&__os_semaphore_cb_start__;
uint32_t cb_length = (uint32_t)&__os_semaphore_cb_length__;
// Check the section boundaries
if (((uint32_t)semaphore - cb_start) >= cb_length) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check the object alignment
if ((((uint32_t)semaphore - cb_start) % sizeof(os_semaphore_t)) != 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#else
// Check NULL pointer
if (semaphore == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#endif
return TRUE;
}
// ==== Library functions ====
/// Destroy a Semaphore object.
/// \param[in] semaphore semaphore object.
static void osRtxSemaphoreDestroy (os_semaphore_t *semaphore) {
// Mark object as invalid
semaphore->id = osRtxIdInvalid;
// Free object memory
if ((semaphore->flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.semaphore, semaphore);
#else
if (osRtxInfo.mpi.semaphore != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.semaphore, semaphore);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, semaphore);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxSemaphoreMemUsage.cnt_free++;
#endif
}
EvrRtxSemaphoreDestroyed(semaphore);
}
#ifdef RTX_SAFETY_CLASS
/// Delete a Semaphore safety class.
/// \param[in] safety_class safety class.
/// \param[in] mode safety mode.
void osRtxSemaphoreDeleteClass (uint32_t safety_class, uint32_t mode) {
os_semaphore_t *semaphore;
os_thread_t *thread;
uint32_t length;
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
semaphore = (os_semaphore_t *)(uint32_t)&__os_semaphore_cb_start__;
length = (uint32_t)&__os_semaphore_cb_length__;
while (length >= sizeof(os_semaphore_t)) {
if ( (semaphore->id == osRtxIdSemaphore) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((semaphore->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((semaphore->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
while (semaphore->thread_list != NULL) {
thread = osRtxThreadListGet(osRtxObject(semaphore));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
}
osRtxSemaphoreDestroy(semaphore);
}
length -= sizeof(os_semaphore_t);
semaphore++;
}
}
#endif
// ==== Post ISR processing ====
/// Semaphore post ISR processing.
/// \param[in] semaphore semaphore object.
static void osRtxSemaphorePostProcess (os_semaphore_t *semaphore) {
os_thread_t *thread;
// Check if Thread is waiting for a token
if (semaphore->thread_list != NULL) {
// Try to acquire token
if (SemaphoreTokenDecrement(semaphore) != 0U) {
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(semaphore));
osRtxThreadWaitExit(thread, (uint32_t)osOK, FALSE);
EvrRtxSemaphoreAcquired(semaphore, semaphore->tokens);
}
}
}
// ==== Service Calls ====
/// Create and Initialize a Semaphore object.
/// \note API identical to osSemaphoreNew
static osSemaphoreId_t svcRtxSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
os_semaphore_t *semaphore;
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread = osRtxThreadGetRunning();
uint32_t attr_bits;
#endif
uint8_t flags;
const char *name;
// Check parameters
if ((max_count == 0U) || (max_count > osRtxSemaphoreTokenLimit) || (initial_count > max_count)) {
EvrRtxSemaphoreError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
// Process attributes
if (attr != NULL) {
name = attr->name;
#ifdef RTX_SAFETY_CLASS
attr_bits = attr->attr_bits;
#endif
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
semaphore = attr->cb_mem;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) <
(uint8_t)((attr_bits & osSafetyClass_Msk) >> osSafetyClass_Pos))) {
EvrRtxSemaphoreError(NULL, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
#endif
if (semaphore != NULL) {
if (!IsSemaphorePtrValid(semaphore) || (attr->cb_size != sizeof(os_semaphore_t))) {
EvrRtxSemaphoreError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (attr->cb_size != 0U) {
EvrRtxSemaphoreError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
} else {
name = NULL;
#ifdef RTX_SAFETY_CLASS
attr_bits = 0U;
#endif
semaphore = NULL;
}
// Allocate object memory if not provided
if (semaphore == NULL) {
if (osRtxInfo.mpi.semaphore != NULL) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
semaphore = osRtxMemoryPoolAlloc(osRtxInfo.mpi.semaphore);
#ifndef RTX_OBJ_PTR_CHECK
} else {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
semaphore = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_semaphore_t), 1U);
#endif
}
#ifdef RTX_OBJ_MEM_USAGE
if (semaphore != NULL) {
uint32_t used;
osRtxSemaphoreMemUsage.cnt_alloc++;
used = osRtxSemaphoreMemUsage.cnt_alloc - osRtxSemaphoreMemUsage.cnt_free;
if (osRtxSemaphoreMemUsage.max_used < used) {
osRtxSemaphoreMemUsage.max_used = used;
}
}
#endif
flags = osRtxFlagSystemObject;
} else {
flags = 0U;
}
if (semaphore != NULL) {
// Initialize control block
semaphore->id = osRtxIdSemaphore;
semaphore->flags = flags;
semaphore->attr = 0U;
semaphore->name = name;
semaphore->thread_list = NULL;
semaphore->tokens = (uint16_t)initial_count;
semaphore->max_tokens = (uint16_t)max_count;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
semaphore->attr |= (uint8_t)((attr_bits & osSafetyClass_Msk) >>
(osSafetyClass_Pos - osRtxAttrClass_Pos));
} else {
// Inherit safety class from the running thread
if (thread != NULL) {
semaphore->attr |= (uint8_t)(thread->attr & osRtxAttrClass_Msk);
}
}
#endif
// Register post ISR processing function
osRtxInfo.post_process.semaphore = osRtxSemaphorePostProcess;
EvrRtxSemaphoreCreated(semaphore, semaphore->name);
} else {
EvrRtxSemaphoreError(NULL,(int32_t)osErrorNoMemory);
}
return semaphore;
}
/// Get name of a Semaphore object.
/// \note API identical to osSemaphoreGetName
static const char *svcRtxSemaphoreGetName (osSemaphoreId_t semaphore_id) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreGetName(semaphore, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxSemaphoreGetName(semaphore, semaphore->name);
return semaphore->name;
}
/// Acquire a Semaphore token or timeout if no tokens are available.
/// \note API identical to osSemaphoreAcquire
static osStatus_t svcRtxSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
osStatus_t status;
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (semaphore->attr >> osRtxAttrClass_Pos))) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Try to acquire token
if (SemaphoreTokenDecrement(semaphore) != 0U) {
EvrRtxSemaphoreAcquired(semaphore, semaphore->tokens);
status = osOK;
} else {
// No token available
if (timeout != 0U) {
EvrRtxSemaphoreAcquirePending(semaphore, timeout);
// Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingSemaphore, timeout)) {
osRtxThreadListPut(osRtxObject(semaphore), osRtxThreadGetRunning());
} else {
EvrRtxSemaphoreAcquireTimeout(semaphore);
}
status = osErrorTimeout;
} else {
EvrRtxSemaphoreNotAcquired(semaphore);
status = osErrorResource;
}
}
return status;
}
/// Release a Semaphore token that was acquired by osSemaphoreAcquire.
/// \note API identical to osSemaphoreRelease
static osStatus_t svcRtxSemaphoreRelease (osSemaphoreId_t semaphore_id) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
os_thread_t *thread;
osStatus_t status;
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (semaphore->attr >> osRtxAttrClass_Pos))) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Check if Thread is waiting for a token
if (semaphore->thread_list != NULL) {
EvrRtxSemaphoreReleased(semaphore, semaphore->tokens);
// Wakeup waiting Thread with highest Priority
thread = osRtxThreadListGet(osRtxObject(semaphore));
osRtxThreadWaitExit(thread, (uint32_t)osOK, TRUE);
EvrRtxSemaphoreAcquired(semaphore, semaphore->tokens);
status = osOK;
} else {
// Try to release token
if (SemaphoreTokenIncrement(semaphore) != 0U) {
EvrRtxSemaphoreReleased(semaphore, semaphore->tokens);
status = osOK;
} else {
EvrRtxSemaphoreError(semaphore, osRtxErrorSemaphoreCountLimit);
status = osErrorResource;
}
}
return status;
}
/// Get current Semaphore token count.
/// \note API identical to osSemaphoreGetCount
static uint32_t svcRtxSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreGetCount(semaphore, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
EvrRtxSemaphoreGetCount(semaphore, semaphore->tokens);
return semaphore->tokens;
}
/// Delete a Semaphore object.
/// \note API identical to osSemaphoreDelete
static osStatus_t svcRtxSemaphoreDelete (osSemaphoreId_t semaphore_id) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
os_thread_t *thread;
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (semaphore->attr >> osRtxAttrClass_Pos))) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Unblock waiting threads
if (semaphore->thread_list != NULL) {
do {
thread = osRtxThreadListGet(osRtxObject(semaphore));
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
} while (semaphore->thread_list != NULL);
osRtxThreadDispatch(NULL);
}
osRtxSemaphoreDestroy(semaphore);
return osOK;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_3(SemaphoreNew, osSemaphoreId_t, uint32_t, uint32_t, const osSemaphoreAttr_t *)
SVC0_1(SemaphoreGetName, const char *, osSemaphoreId_t)
SVC0_2(SemaphoreAcquire, osStatus_t, osSemaphoreId_t, uint32_t)
SVC0_1(SemaphoreRelease, osStatus_t, osSemaphoreId_t)
SVC0_1(SemaphoreGetCount, uint32_t, osSemaphoreId_t)
SVC0_1(SemaphoreDelete, osStatus_t, osSemaphoreId_t)
//lint --flb "Library End"
// ==== ISR Calls ====
/// Acquire a Semaphore token or timeout if no tokens are available.
/// \note API identical to osSemaphoreAcquire
__STATIC_INLINE
osStatus_t isrRtxSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
osStatus_t status;
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore) || (timeout != 0U)) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
// Try to acquire token
if (SemaphoreTokenDecrement(semaphore) != 0U) {
EvrRtxSemaphoreAcquired(semaphore, semaphore->tokens);
status = osOK;
} else {
// No token available
EvrRtxSemaphoreNotAcquired(semaphore);
status = osErrorResource;
}
return status;
}
/// Release a Semaphore token that was acquired by osSemaphoreAcquire.
/// \note API identical to osSemaphoreRelease
__STATIC_INLINE
osStatus_t isrRtxSemaphoreRelease (osSemaphoreId_t semaphore_id) {
os_semaphore_t *semaphore = osRtxSemaphoreId(semaphore_id);
osStatus_t status;
// Check parameters
if (!IsSemaphorePtrValid(semaphore) || (semaphore->id != osRtxIdSemaphore)) {
EvrRtxSemaphoreError(semaphore, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
// Try to release token
if (SemaphoreTokenIncrement(semaphore) != 0U) {
// Register post ISR processing
osRtxPostProcess(osRtxObject(semaphore));
EvrRtxSemaphoreReleased(semaphore, semaphore->tokens);
status = osOK;
} else {
EvrRtxSemaphoreError(semaphore, osRtxErrorSemaphoreCountLimit);
status = osErrorResource;
}
return status;
}
// ==== Public API ====
/// Create and Initialize a Semaphore object.
osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
osSemaphoreId_t semaphore_id;
EvrRtxSemaphoreNew(max_count, initial_count, attr);
if (IsException() || IsIrqMasked()) {
EvrRtxSemaphoreError(NULL, (int32_t)osErrorISR);
semaphore_id = NULL;
} else {
semaphore_id = __svcSemaphoreNew(max_count, initial_count, attr);
}
return semaphore_id;
}
/// Get name of a Semaphore object.
const char *osSemaphoreGetName (osSemaphoreId_t semaphore_id) {
const char *name;
if (IsException() || IsIrqMasked()) {
name = svcRtxSemaphoreGetName(semaphore_id);
} else {
name = __svcSemaphoreGetName(semaphore_id);
}
return name;
}
/// Acquire a Semaphore token or timeout if no tokens are available.
osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
osStatus_t status;
EvrRtxSemaphoreAcquire(semaphore_id, timeout);
if (IsException() || IsIrqMasked()) {
status = isrRtxSemaphoreAcquire(semaphore_id, timeout);
} else {
status = __svcSemaphoreAcquire(semaphore_id, timeout);
}
return status;
}
/// Release a Semaphore token that was acquired by osSemaphoreAcquire.
osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
osStatus_t status;
EvrRtxSemaphoreRelease(semaphore_id);
if (IsException() || IsIrqMasked()) {
status = isrRtxSemaphoreRelease(semaphore_id);
} else {
status = __svcSemaphoreRelease(semaphore_id);
}
return status;
}
/// Get current Semaphore token count.
uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
uint32_t count;
if (IsException() || IsIrqMasked()) {
count = svcRtxSemaphoreGetCount(semaphore_id);
} else {
count = __svcSemaphoreGetCount(semaphore_id);
}
return count;
}
/// Delete a Semaphore object.
osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
osStatus_t status;
EvrRtxSemaphoreDelete(semaphore_id);
if (IsException() || IsIrqMasked()) {
EvrRtxSemaphoreError(semaphore_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcSemaphoreDelete(semaphore_id);
}
return status;
}

View File

@ -0,0 +1,220 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: System functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// ==== Helper functions ====
/// Put Object into ISR Queue.
/// \param[in] object object.
/// \return 1 - success, 0 - failure.
static uint32_t isr_queue_put (os_object_t *object) {
#if (EXCLUSIVE_ACCESS == 0)
uint32_t primask = __get_PRIMASK();
#else
uint32_t n;
#endif
uint16_t max;
uint32_t ret;
max = osRtxInfo.isr_queue.max;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
if (osRtxInfo.isr_queue.cnt < max) {
osRtxInfo.isr_queue.cnt++;
osRtxInfo.isr_queue.data[osRtxInfo.isr_queue.in] = object;
if (++osRtxInfo.isr_queue.in == max) {
osRtxInfo.isr_queue.in = 0U;
}
ret = 1U;
} else {
ret = 0U;
}
if (primask == 0U) {
__enable_irq();
}
#else
if (atomic_inc16_lt(&osRtxInfo.isr_queue.cnt, max) < max) {
n = atomic_inc16_lim(&osRtxInfo.isr_queue.in, max);
osRtxInfo.isr_queue.data[n] = object;
ret = 1U;
} else {
ret = 0U;
}
#endif
return ret;
}
/// Get Object from ISR Queue.
/// \return object or NULL.
static os_object_t *isr_queue_get (void) {
#if (EXCLUSIVE_ACCESS != 0)
uint32_t n;
#endif
uint16_t max;
os_object_t *ret;
max = osRtxInfo.isr_queue.max;
#if (EXCLUSIVE_ACCESS == 0)
__disable_irq();
if (osRtxInfo.isr_queue.cnt != 0U) {
osRtxInfo.isr_queue.cnt--;
ret = osRtxObject(osRtxInfo.isr_queue.data[osRtxInfo.isr_queue.out]);
if (++osRtxInfo.isr_queue.out == max) {
osRtxInfo.isr_queue.out = 0U;
}
} else {
ret = NULL;
}
__enable_irq();
#else
if (atomic_dec16_nz(&osRtxInfo.isr_queue.cnt) != 0U) {
n = atomic_inc16_lim(&osRtxInfo.isr_queue.out, max);
ret = osRtxObject(osRtxInfo.isr_queue.data[n]);
} else {
ret = NULL;
}
#endif
return ret;
}
// ==== Library Functions ====
/// Tick Handler.
//lint -esym(714,osRtxTick_Handler) "Referenced by Exception handlers"
//lint -esym(759,osRtxTick_Handler) "Prototype in header"
//lint -esym(765,osRtxTick_Handler) "Global scope"
void osRtxTick_Handler (void) {
os_thread_t *thread;
OS_Tick_AcknowledgeIRQ();
osRtxInfo.kernel.tick++;
// Process Thread Delays
osRtxThreadDelayTick();
osRtxThreadDispatch(NULL);
// Process Timers
if (osRtxInfo.timer.tick != NULL) {
osRtxInfo.timer.tick();
}
#ifdef RTX_THREAD_WATCHDOG
// Process Watchdog Timers
osRtxThreadWatchdogTick();
#endif
// Check Round Robin timeout
if (osRtxInfo.thread.robin.timeout != 0U) {
thread = osRtxInfo.thread.run.next;
if (thread != osRtxInfo.thread.robin.thread) {
osRtxInfo.thread.robin.thread = thread;
if (thread->delay == 0U) {
// Reset Round Robin
thread->delay = osRtxInfo.thread.robin.timeout;
}
}
if (thread->delay != 0U) {
thread->delay--;
}
if (thread->delay == 0U) {
// Round Robin Timeout
if (osRtxKernelGetState() == osRtxKernelRunning) {
thread = osRtxInfo.thread.ready.thread_list;
if ((thread != NULL) && (thread->priority == osRtxInfo.thread.robin.thread->priority)) {
osRtxThreadListRemove(thread);
osRtxThreadReadyPut(osRtxInfo.thread.robin.thread);
EvrRtxThreadPreempted(osRtxInfo.thread.robin.thread);
osRtxThreadSwitch(thread);
osRtxInfo.thread.robin.thread = thread;
thread->delay = osRtxInfo.thread.robin.timeout;
}
}
}
}
}
/// Pending Service Call Handler.
//lint -esym(714,osRtxPendSV_Handler) "Referenced by Exception handlers"
//lint -esym(759,osRtxPendSV_Handler) "Prototype in header"
//lint -esym(765,osRtxPendSV_Handler) "Global scope"
void osRtxPendSV_Handler (void) {
os_object_t *object;
for (;;) {
object = isr_queue_get();
if (object == NULL) {
break;
}
switch (object->id) {
case osRtxIdThread:
osRtxInfo.post_process.thread(osRtxThreadObject(object));
break;
case osRtxIdEventFlags:
osRtxInfo.post_process.event_flags(osRtxEventFlagsObject(object));
break;
case osRtxIdSemaphore:
osRtxInfo.post_process.semaphore(osRtxSemaphoreObject(object));
break;
case osRtxIdMemoryPool:
osRtxInfo.post_process.memory_pool(osRtxMemoryPoolObject(object));
break;
case osRtxIdMessage:
osRtxInfo.post_process.message(osRtxMessageObject(object));
break;
default:
// Should never come here
break;
}
}
osRtxThreadDispatch(NULL);
}
/// Register post ISR processing.
/// \param[in] object generic object.
void osRtxPostProcess (os_object_t *object) {
if (isr_queue_put(object) != 0U) {
if (osRtxInfo.kernel.blocked == 0U) {
SetPendSV();
} else {
osRtxInfo.kernel.pendSV = 1U;
}
} else {
(void)osRtxKernelErrorNotify(osRtxErrorISRQueueOverflow, object);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,631 @@
/*
* Copyright (c) 2013-2023 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Timer functions
*
* -----------------------------------------------------------------------------
*/
#include "rtx_lib.h"
// OS Runtime Object Memory Usage
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxTimerMemUsage \
__attribute__((section(".data.os.timer.obj"))) =
{ 0U, 0U, 0U };
#endif
// ==== Helper functions ====
/// Insert Timer into the Timer List sorted by Time.
/// \param[in] timer timer object.
/// \param[in] tick timer tick.
static void TimerInsert (os_timer_t *timer, uint32_t tick) {
os_timer_t *prev, *next;
prev = NULL;
next = osRtxInfo.timer.list;
while ((next != NULL) && (next->tick <= tick)) {
tick -= next->tick;
prev = next;
next = next->next;
}
timer->tick = tick;
timer->prev = prev;
timer->next = next;
if (next != NULL) {
next->tick -= timer->tick;
next->prev = timer;
}
if (prev != NULL) {
prev->next = timer;
} else {
osRtxInfo.timer.list = timer;
}
}
/// Remove Timer from the Timer List.
/// \param[in] timer timer object.
static void TimerRemove (const os_timer_t *timer) {
if (timer->next != NULL) {
timer->next->tick += timer->tick;
timer->next->prev = timer->prev;
}
if (timer->prev != NULL) {
timer->prev->next = timer->next;
} else {
osRtxInfo.timer.list = timer->next;
}
}
/// Unlink Timer from the Timer List Head.
/// \param[in] timer timer object.
static void TimerUnlink (const os_timer_t *timer) {
if (timer->next != NULL) {
timer->next->prev = timer->prev;
}
osRtxInfo.timer.list = timer->next;
}
/// Verify that Timer object pointer is valid.
/// \param[in] timer timer object.
/// \return true - valid, false - invalid.
static bool_t IsTimerPtrValid (const os_timer_t *timer) {
#ifdef RTX_OBJ_PTR_CHECK
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
uint32_t cb_start = (uint32_t)&__os_timer_cb_start__;
uint32_t cb_length = (uint32_t)&__os_timer_cb_length__;
// Check the section boundaries
if (((uint32_t)timer - cb_start) >= cb_length) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check the object alignment
if ((((uint32_t)timer - cb_start) % sizeof(os_timer_t)) != 0U) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#else
// Check NULL pointer
if (timer == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
#endif
return TRUE;
}
// ==== Library functions ====
/// Timer Tick (called each SysTick).
static void osRtxTimerTick (void) {
os_thread_t *thread_running;
os_timer_t *timer;
osStatus_t status;
timer = osRtxInfo.timer.list;
if (timer == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return;
}
thread_running = osRtxThreadGetRunning();
timer->tick--;
while ((timer != NULL) && (timer->tick == 0U)) {
TimerUnlink(timer);
status = osMessageQueuePut(osRtxInfo.timer.mq, &timer->finfo, 0U, 0U);
if (status != osOK) {
const os_thread_t *thread = osRtxThreadGetRunning();
osRtxThreadSetRunning(osRtxInfo.thread.run.next);
(void)osRtxKernelErrorNotify(osRtxErrorTimerQueueOverflow, timer);
if (osRtxThreadGetRunning() == NULL) {
if (thread_running == thread) {
thread_running = NULL;
}
}
}
if ((timer->attr & osRtxTimerPeriodic) != 0U) {
TimerInsert(timer, timer->load);
} else {
timer->state = osRtxTimerStopped;
}
timer = osRtxInfo.timer.list;
}
osRtxThreadSetRunning(thread_running);
}
/// Setup Timer Thread objects.
//lint -esym(714,osRtxTimerSetup) "Referenced from library configuration"
//lint -esym(759,osRtxTimerSetup) "Prototype in header"
//lint -esym(765,osRtxTimerSetup) "Global scope"
int32_t osRtxTimerSetup (void) {
int32_t ret = -1;
if (osRtxMessageQueueTimerSetup() == 0) {
osRtxInfo.timer.tick = osRtxTimerTick;
ret = 0;
}
return ret;
}
/// Timer Thread
//lint -esym(714,osRtxTimerThread) "Referenced from library configuration"
//lint -esym(759,osRtxTimerThread) "Prototype in header"
//lint -esym(765,osRtxTimerThread) "Global scope"
__NO_RETURN void osRtxTimerThread (void *argument) {
os_timer_finfo_t finfo;
osStatus_t status;
osMessageQueueId_t mq = (osMessageQueueId_t)argument;
for (;;) {
//lint -e{934} "Taking address of near auto variable"
status = osMessageQueueGet(mq, &finfo, NULL, osWaitForever);
if (status == osOK) {
EvrRtxTimerCallback(finfo.func, finfo.arg);
(finfo.func)(finfo.arg);
}
}
}
/// Destroy a Timer object.
/// \param[in] timer timer object.
static void osRtxTimerDestroy (os_timer_t *timer) {
// Mark object as inactive and invalid
timer->state = osRtxTimerInactive;
timer->id = osRtxIdInvalid;
// Free object memory
if ((timer->flags & osRtxFlagSystemObject) != 0U) {
#ifdef RTX_OBJ_PTR_CHECK
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.timer, timer);
#else
if (osRtxInfo.mpi.timer != NULL) {
(void)osRtxMemoryPoolFree(osRtxInfo.mpi.timer, timer);
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, timer);
}
#endif
#ifdef RTX_OBJ_MEM_USAGE
osRtxTimerMemUsage.cnt_free++;
#endif
}
EvrRtxTimerDestroyed(timer);
}
#ifdef RTX_SAFETY_CLASS
/// Delete a Timer safety class.
/// \param[in] safety_class safety class.
/// \param[in] mode safety mode.
void osRtxTimerDeleteClass (uint32_t safety_class, uint32_t mode) {
os_timer_t *timer;
uint32_t length;
//lint --e{923} --e{9078} "cast from pointer to unsigned int" [MISRA Note 7]
timer = (os_timer_t *)(uint32_t)&__os_timer_cb_start__;
length = (uint32_t)&__os_timer_cb_length__;
while (length >= sizeof(os_timer_t)) {
if ( (timer->id == osRtxIdTimer) &&
((((mode & osSafetyWithSameClass) != 0U) &&
((timer->attr >> osRtxAttrClass_Pos) == (uint8_t)safety_class)) ||
(((mode & osSafetyWithLowerClass) != 0U) &&
((timer->attr >> osRtxAttrClass_Pos) < (uint8_t)safety_class)))) {
if (timer->state == osRtxTimerRunning) {
TimerRemove(timer);
}
osRtxTimerDestroy(timer);
}
length -= sizeof(os_timer_t);
timer++;
}
}
#endif
// ==== Service Calls ====
/// Create and Initialize a timer.
/// \note API identical to osTimerNew
static osTimerId_t svcRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
os_timer_t *timer;
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread = osRtxThreadGetRunning();
uint32_t attr_bits;
#endif
uint8_t flags;
const char *name;
// Check parameters
if ((func == NULL) || ((type != osTimerOnce) && (type != osTimerPeriodic))) {
EvrRtxTimerError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
// Process attributes
if (attr != NULL) {
name = attr->name;
#ifdef RTX_SAFETY_CLASS
attr_bits = attr->attr_bits;
#endif
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 6]
timer = attr->cb_mem;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) <
(uint8_t)((attr_bits & osSafetyClass_Msk) >> osSafetyClass_Pos))) {
EvrRtxTimerError(NULL, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
#endif
if (timer != NULL) {
if (!IsTimerPtrValid(timer) || (attr->cb_size != sizeof(os_timer_t))) {
EvrRtxTimerError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
} else {
if (attr->cb_size != 0U) {
EvrRtxTimerError(NULL, osRtxErrorInvalidControlBlock);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
} else {
name = NULL;
#ifdef RTX_SAFETY_CLASS
attr_bits = 0U;
#endif
timer = NULL;
}
// Allocate object memory if not provided
if (timer == NULL) {
if (osRtxInfo.mpi.timer != NULL) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
timer = osRtxMemoryPoolAlloc(osRtxInfo.mpi.timer);
#ifndef RTX_OBJ_PTR_CHECK
} else {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
timer = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_timer_t), 1U);
#endif
}
#ifdef RTX_OBJ_MEM_USAGE
if (timer != NULL) {
uint32_t used;
osRtxTimerMemUsage.cnt_alloc++;
used = osRtxTimerMemUsage.cnt_alloc - osRtxTimerMemUsage.cnt_free;
if (osRtxTimerMemUsage.max_used < used) {
osRtxTimerMemUsage.max_used = used;
}
}
#endif
flags = osRtxFlagSystemObject;
} else {
flags = 0U;
}
if (timer != NULL) {
// Initialize control block
timer->id = osRtxIdTimer;
timer->state = osRtxTimerStopped;
timer->flags = flags;
if (type == osTimerPeriodic) {
timer->attr = osRtxTimerPeriodic;
} else {
timer->attr = 0U;
}
timer->name = name;
timer->prev = NULL;
timer->next = NULL;
timer->tick = 0U;
timer->load = 0U;
timer->finfo.func = func;
timer->finfo.arg = argument;
#ifdef RTX_SAFETY_CLASS
if ((attr_bits & osSafetyClass_Valid) != 0U) {
timer->attr |= (uint8_t)((attr_bits & osSafetyClass_Msk) >>
(osSafetyClass_Pos - osRtxAttrClass_Pos));
} else {
// Inherit safety class from the running thread
if (thread != NULL) {
timer->attr |= (uint8_t)(thread->attr & osRtxAttrClass_Msk);
}
}
#endif
EvrRtxTimerCreated(timer, timer->name);
} else {
EvrRtxTimerError(NULL, (int32_t)osErrorNoMemory);
}
return timer;
}
/// Get name of a timer.
/// \note API identical to osTimerGetName
static const char *svcRtxTimerGetName (osTimerId_t timer_id) {
os_timer_t *timer = osRtxTimerId(timer_id);
// Check parameters
if (!IsTimerPtrValid(timer) || (timer->id != osRtxIdTimer)) {
EvrRtxTimerGetName(timer, NULL);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
EvrRtxTimerGetName(timer, timer->name);
return timer->name;
}
/// Start or restart a timer.
/// \note API identical to osTimerStart
static osStatus_t svcRtxTimerStart (osTimerId_t timer_id, uint32_t ticks) {
os_timer_t *timer = osRtxTimerId(timer_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
// Check parameters
if (!IsTimerPtrValid(timer) || (timer->id != osRtxIdTimer) || (ticks == 0U)) {
EvrRtxTimerError(timer, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (timer->attr >> osRtxAttrClass_Pos))) {
EvrRtxTimerError(timer, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
if (timer->state == osRtxTimerRunning) {
timer->load = ticks;
TimerRemove(timer);
} else {
if (osRtxInfo.timer.tick == NULL) {
EvrRtxTimerError(timer, (int32_t)osErrorResource);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource;
} else {
timer->state = osRtxTimerRunning;
timer->load = ticks;
}
}
TimerInsert(timer, ticks);
EvrRtxTimerStarted(timer);
return osOK;
}
/// Stop a timer.
/// \note API identical to osTimerStop
static osStatus_t svcRtxTimerStop (osTimerId_t timer_id) {
os_timer_t *timer = osRtxTimerId(timer_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
// Check parameters
if (!IsTimerPtrValid(timer) || (timer->id != osRtxIdTimer)) {
EvrRtxTimerError(timer, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (timer->attr >> osRtxAttrClass_Pos))) {
EvrRtxTimerError(timer, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
// Check object state
if (timer->state != osRtxTimerRunning) {
EvrRtxTimerError(timer, (int32_t)osErrorResource);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource;
}
timer->state = osRtxTimerStopped;
TimerRemove(timer);
EvrRtxTimerStopped(timer);
return osOK;
}
/// Check if a timer is running.
/// \note API identical to osTimerIsRunning
static uint32_t svcRtxTimerIsRunning (osTimerId_t timer_id) {
os_timer_t *timer = osRtxTimerId(timer_id);
uint32_t is_running;
// Check parameters
if (!IsTimerPtrValid(timer) || (timer->id != osRtxIdTimer)) {
EvrRtxTimerIsRunning(timer, 0U);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return 0U;
}
if (timer->state == osRtxTimerRunning) {
EvrRtxTimerIsRunning(timer, 1U);
is_running = 1U;
} else {
EvrRtxTimerIsRunning(timer, 0U);
is_running = 0;
}
return is_running;
}
/// Delete a timer.
/// \note API identical to osTimerDelete
static osStatus_t svcRtxTimerDelete (osTimerId_t timer_id) {
os_timer_t *timer = osRtxTimerId(timer_id);
#ifdef RTX_SAFETY_CLASS
const os_thread_t *thread;
#endif
// Check parameters
if (!IsTimerPtrValid(timer) || (timer->id != osRtxIdTimer)) {
EvrRtxTimerError(timer, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
#ifdef RTX_SAFETY_CLASS
// Check running thread safety class
thread = osRtxThreadGetRunning();
if ((thread != NULL) &&
((thread->attr >> osRtxAttrClass_Pos) < (timer->attr >> osRtxAttrClass_Pos))) {
EvrRtxTimerError(timer, (int32_t)osErrorSafetyClass);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorSafetyClass;
}
#endif
if (timer->state == osRtxTimerRunning) {
TimerRemove(timer);
}
osRtxTimerDestroy(timer);
return osOK;
}
// Service Calls definitions
//lint ++flb "Library Begin" [MISRA Note 11]
SVC0_4(TimerNew, osTimerId_t, osTimerFunc_t, osTimerType_t, void *, const osTimerAttr_t *)
SVC0_1(TimerGetName, const char *, osTimerId_t)
SVC0_2(TimerStart, osStatus_t, osTimerId_t, uint32_t)
SVC0_1(TimerStop, osStatus_t, osTimerId_t)
SVC0_1(TimerIsRunning, uint32_t, osTimerId_t)
SVC0_1(TimerDelete, osStatus_t, osTimerId_t)
//lint --flb "Library End"
// ==== Public API ====
/// Create and Initialize a timer.
osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
osTimerId_t timer_id;
EvrRtxTimerNew(func, type, argument, attr);
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(NULL, (int32_t)osErrorISR);
timer_id = NULL;
} else {
timer_id = __svcTimerNew(func, type, argument, attr);
}
return timer_id;
}
/// Get name of a timer.
const char *osTimerGetName (osTimerId_t timer_id) {
const char *name;
if (IsException() || IsIrqMasked()) {
name = svcRtxTimerGetName(timer_id);
} else {
name = __svcTimerGetName(timer_id);
}
return name;
}
/// Start or restart a timer.
osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
osStatus_t status;
EvrRtxTimerStart(timer_id, ticks);
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcTimerStart(timer_id, ticks);
}
return status;
}
/// Stop a timer.
osStatus_t osTimerStop (osTimerId_t timer_id) {
osStatus_t status;
EvrRtxTimerStop(timer_id);
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcTimerStop(timer_id);
}
return status;
}
/// Check if a timer is running.
uint32_t osTimerIsRunning (osTimerId_t timer_id) {
uint32_t is_running;
if (IsException() || IsIrqMasked()) {
EvrRtxTimerIsRunning(timer_id, 0U);
is_running = 0U;
} else {
is_running = __svcTimerIsRunning(timer_id);
}
return is_running;
}
/// Delete a timer.
osStatus_t osTimerDelete (osTimerId_t timer_id) {
osStatus_t status;
EvrRtxTimerDelete(timer_id);
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
status = __svcTimerDelete(timer_id);
}
return status;
}