Skip to content

Commit 3f676a5

Browse files
committed
ARM: Add common Cortex-M mtarch implementation
This includes support for preemption ready to be integrated into exception handlers. Signed-off-by: Benoît Thébaudeau <benoit.thebaudeau.dev@gmail.com>
1 parent 8180512 commit 3f676a5

File tree

2 files changed

+404
-0
lines changed

2 files changed

+404
-0
lines changed

cpu/arm/common/sys/mtarch.c

+285
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,285 @@
1+
/*
2+
* Copyright (c) 2016, Benoît Thébaudeau <benoit.thebaudeau.dev@gmail.com>
3+
* All rights reserved.
4+
*
5+
* Redistribution and use in source and binary forms, with or without
6+
* modification, are permitted provided that the following conditions are met:
7+
*
8+
* 1. Redistributions of source code must retain the above copyright notice,
9+
* this list of conditions and the following disclaimer.
10+
*
11+
* 2. Redistributions in binary form must reproduce the above copyright notice,
12+
* this list of conditions and the following disclaimer in the documentation
13+
* and/or other materials provided with the distribution.
14+
*
15+
* 3. Neither the name of the copyright holder nor the names of its contributors
16+
* may be used to endorse or promote products derived from this software
17+
* without specific prior written permission.
18+
*
19+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23+
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24+
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25+
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26+
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27+
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29+
* POSSIBILITY OF SUCH DAMAGE.
30+
*/
31+
/**
32+
* \addtogroup arm-cm-mtarch
33+
* @{
34+
*
35+
* \file
36+
* Implmentation of the ARM Cortex-M support for Contiki multi-threading.
37+
*/
38+
#include CMSIS_DEV_HDR
39+
#include "sys/mt.h"
40+
41+
#include <stdint.h>
42+
43+
#define EXC_RETURN_PROCESS_THREAD_BASIC_FRAME 0xfffffffd
44+
45+
/* Check whether EXC_RETURN[3:0] in LR indicates a preempted process thread. */
46+
#if __ARM_ARCH == 7
47+
#define PREEMPTED_PROCESS_THREAD() \
48+
"and r0, lr, #0xf\n\t" \
49+
"cmp r0, #0xd\n\t"
50+
#elif __ARM_ARCH == 6
51+
#define PREEMPTED_PROCESS_THREAD() \
52+
"mov r0, lr\n\t" \
53+
"movs r1, #0xf\n\t" \
54+
"and r0, r1\n\t" \
55+
"cmp r0, #0xd\n\t"
56+
#else
57+
#error Unsupported ARM architecture
58+
#endif
59+
/*----------------------------------------------------------------------------*/
60+
/**
61+
* \brief SVCall system handler
62+
*
63+
* This exception handler executes the action requested by the corresponding
64+
* \c svc instruction, which is a task switch from the main Contiki thread to an
65+
* mt thread or the other way around.
66+
*/
67+
__attribute__ ((__naked__))
68+
void
69+
svcall_handler(void)
70+
{
71+
/* This is a controlled system handler, so do not use ENERGEST_TYPE_IRQ. */
72+
73+
/*
74+
* Decide whether to switch to the main thread or to a process thread,
75+
* depending on the type of the thread preempted by SVCall.
76+
*/
77+
__asm__ (PREEMPTED_PROCESS_THREAD()
78+
#if __ARM_ARCH == 7
79+
"it eq\n\t"
80+
#endif
81+
"beq switch_to_main_thread\n\t"
82+
83+
/*
84+
* - Retrieve from the main stack the PSP passed to SVCall through R0. Note
85+
* that it cannot be retrieved directly from R0 on exception entry because
86+
* this register may have been overwritten by other exceptions on SVCall
87+
* entry.
88+
* - Save the main thread context to the main stack.
89+
* - Restore the process thread context from the process stack.
90+
* - Return to Thread mode, resuming the process thread.
91+
*/
92+
#if __ARM_ARCH == 7
93+
"ldr r0, [sp]\n\t"
94+
"push {r4-r11, lr}\n\t"
95+
"add r1, r0, #9 * 4\n\t"
96+
"msr psp, r1\n\t"
97+
"ldmia r0, {r4-r11, pc}");
98+
#elif __ARM_ARCH == 6
99+
"mov r0, r8\n\t"
100+
"mov r1, r9\n\t"
101+
"mov r2, r10\n\t"
102+
"mov r3, r11\n\t"
103+
"push {r0-r7, lr}\n\t"
104+
"ldr r0, [sp, #9 * 4]\n\t"
105+
"ldmia r0!, {r4-r7}\n\t"
106+
"mov r8, r4\n\t"
107+
"mov r9, r5\n\t"
108+
"mov r10, r6\n\t"
109+
"mov r11, r7\n\t"
110+
"ldmia r0!, {r3-r7}\n\t"
111+
"msr psp, r0\n\t"
112+
"bx r3");
113+
#endif
114+
}
115+
/*----------------------------------------------------------------------------*/
116+
/**
117+
* \brief PendSV system handler
118+
*
119+
* This exception handler executes following a call to mtarch_pstart() from
120+
* another exception handler. It performs a task switch to the main Contiki
121+
* thread if it is not already running.
122+
*/
123+
__attribute__ ((__naked__))
124+
void
125+
pendsv_handler(void)
126+
{
127+
/* This is a controlled system handler, so do not use ENERGEST_TYPE_IRQ. */
128+
129+
/*
130+
* Return without doing anything if PendSV has not preempted a process thread.
131+
* This can occur either because PendSV has preempted the main thread, in
132+
* which case there is nothing to do, or because mtarch_pstart() has been
133+
* called from an exception handler without having called mt_init() first, in
134+
* which case PendSV may have preempted an exception handler and nothing must
135+
* be done because mt is not active.
136+
*/
137+
__asm__ ( PREEMPTED_PROCESS_THREAD()
138+
#if __ARM_ARCH == 7
139+
"it ne\n\t"
140+
"bxne lr\n"
141+
#elif __ARM_ARCH == 6
142+
"beq switch_to_main_thread\n\t"
143+
"bx lr\n"
144+
#endif
145+
146+
/*
147+
* - Save the process thread context to the process stack.
148+
* - Place into the main stack the updated PSP that SVCall must return through
149+
* R0.
150+
* - Restore the main thread context from the main stack.
151+
* - Return to Thread mode, resuming the main thread.
152+
*/
153+
"switch_to_main_thread:\n\t"
154+
"mrs r0, psp\n\t"
155+
#if __ARM_ARCH == 7
156+
"stmdb r0!, {r4-r11, lr}\n\t"
157+
"str r0, [sp, #9 * 4]\n\t"
158+
"pop {r4-r11, pc}");
159+
#elif __ARM_ARCH == 6
160+
"mov r3, lr\n\t"
161+
"sub r0, #5 * 4\n\t"
162+
"stmia r0!, {r3-r7}\n\t"
163+
"mov r4, r8\n\t"
164+
"mov r5, r9\n\t"
165+
"sub r0, #9 * 4\n\t"
166+
"mov r6, r10\n\t"
167+
"mov r7, r11\n\t"
168+
"stmia r0!, {r4-r7}\n\t"
169+
"pop {r4-r7}\n\t"
170+
"sub r0, #4 * 4\n\t"
171+
"mov r8, r4\n\t"
172+
"mov r9, r5\n\t"
173+
"str r0, [sp, #5 * 4]\n\t"
174+
"mov r10, r6\n\t"
175+
"mov r11, r7\n\t"
176+
"pop {r4-r7, pc}");
177+
#endif
178+
}
179+
/*----------------------------------------------------------------------------*/
180+
void
181+
mtarch_init(void)
182+
{
183+
SCB->CCR = (SCB->CCR
184+
#ifdef SCB_CCR_NONBASETHRDENA_Msk
185+
/*
186+
* Make sure that any attempt to enter Thread mode with exceptions
187+
* active faults.
188+
*
189+
* Only SVCall and PendSV are allowed to forcibly enter Thread
190+
* mode, and they are configured with the same, lowest exception
191+
* priority, so no other exceptions may be active.
192+
*/
193+
& ~SCB_CCR_NONBASETHRDENA_Msk
194+
#endif
195+
/*
196+
* Force 8-byte stack pointer alignment on exception entry in order
197+
* to be able to use AAPCS-conforming functions as exception
198+
* handlers.
199+
*/
200+
) | SCB_CCR_STKALIGN_Msk;
201+
202+
/*
203+
* Configure SVCall and PendSV with the same, lowest exception priority.
204+
*
205+
* This makes sure that they cannot preempt each other, and that the processor
206+
* executes them after having handled all other exceptions. If both are
207+
* pending at the same time, then SVCall takes precedence because of its lower
208+
* exception number. In addition, the associated exception handlers do not
209+
* have to check whether they are returning to Thread mode, because they
210+
* cannot preempt any other exception.
211+
*/
212+
NVIC_SetPriority(SVCall_IRQn, (1 << __NVIC_PRIO_BITS) - 1);
213+
NVIC_SetPriority(PendSV_IRQn, (1 << __NVIC_PRIO_BITS) - 1);
214+
215+
/*
216+
* Force the preceding configurations to take effect before further
217+
* operations.
218+
*/
219+
__DSB();
220+
__ISB();
221+
}
222+
/*----------------------------------------------------------------------------*/
223+
void
224+
mtarch_start(struct mtarch_thread *thread,
225+
void (*function)(void *data), void *data)
226+
{
227+
struct mtarch_thread_context *context = &thread->start_stack.context;
228+
229+
/*
230+
* Initialize the thread context with the appropriate values to call
231+
* function() with data and to make function() return to mt_exit() without
232+
* having to call it explicitly.
233+
*/
234+
context->exc_return = EXC_RETURN_PROCESS_THREAD_BASIC_FRAME;
235+
context->r0 = (uint32_t)data;
236+
context->lr = (uint32_t)mt_exit;
237+
context->pc = (uint32_t)function;
238+
context->xpsr = xPSR_T_Msk;
239+
thread->psp = (uint32_t)context;
240+
}
241+
/*----------------------------------------------------------------------------*/
242+
void
243+
mtarch_exec(struct mtarch_thread *thread)
244+
{
245+
/* Pass the PSP to SVCall, and get the updated PSP as its return value. */
246+
register uint32_t psp __asm__ ("r0") = thread->psp;
247+
__asm__ volatile ("svc #0"
248+
: "+r" (psp)
249+
:: "memory");
250+
thread->psp = psp;
251+
}
252+
/*----------------------------------------------------------------------------*/
253+
__attribute__ ((__naked__))
254+
void
255+
mtarch_yield(void)
256+
{
257+
/* Invoke SVCall. */
258+
__asm__ ("svc #0\n\t"
259+
"bx lr");
260+
}
261+
/*----------------------------------------------------------------------------*/
262+
void
263+
mtarch_stop(struct mtarch_thread *thread)
264+
{
265+
}
266+
/*----------------------------------------------------------------------------*/
267+
void
268+
mtarch_pstart(void)
269+
{
270+
/* Trigger PendSV. */
271+
SCB->ICSR = SCB_ICSR_PENDSVSET_Msk;
272+
}
273+
/*----------------------------------------------------------------------------*/
274+
void
275+
mtarch_pstop(void)
276+
{
277+
}
278+
/*----------------------------------------------------------------------------*/
279+
void
280+
mtarch_remove(void)
281+
{
282+
}
283+
/*----------------------------------------------------------------------------*/
284+
285+
/** @} */

0 commit comments

Comments
 (0)