34
34
@
35
35
@ routine called with r0 = irq number, r1 = struct pt_regs *
36
36
@
37
- adrne lr, 1b
37
+ adrne lr, BSYM(1b)
38
38
bne asm_do_IRQ
39
39
40
40
#ifdef CONFIG_SMP
46
46
*/
47
47
test_for_ipi r0, r6, r5, lr
48
48
movne r0, sp
49
- adrne lr, 1b
49
+ adrne lr, BSYM(1b)
50
50
bne do_IPI
51
51
52
52
#ifdef CONFIG_LOCAL_TIMERS
53
53
test_for_ltirq r0, r6, r5, lr
54
54
movne r0, sp
55
- adrne lr, 1b
55
+ adrne lr, BSYM(1b)
56
56
bne do_local_timer
57
57
#endif
58
58
#endif
70
70
*/
71
71
.macro inv_entry, reason
72
72
sub sp, sp, #S_FRAME_SIZE
73
- stmib sp, {r1 - lr}
73
+ ARM( stmib sp, {r1 - lr} )
74
+ THUMB( stmia sp, {r0 - r12} )
75
+ THUMB( str sp, [sp, #S_SP] )
76
+ THUMB( str lr, [sp, #S_LR] )
74
77
mov r1, #\reason
75
78
.endm
76
79
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
126
129
.macro svc_entry, stack_hole =0
127
130
UNWIND(.fnstart )
128
131
UNWIND(.save {r0 - pc} )
129
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
132
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133
+ #ifdef CONFIG_THUMB2_KERNEL
134
+ SPFIX( str r0, [sp] ) @ temporarily saved
135
+ SPFIX( mov r0, sp )
136
+ SPFIX( tst r0, #4 ) @ test original stack alignment
137
+ SPFIX( ldr r0, [sp] ) @ restored
138
+ #else
130
139
SPFIX( tst sp, #4 )
131
- SPFIX( bicne sp, sp, #4 )
132
- stmib sp, {r1 - r12}
140
+ #endif
141
+ SPFIX( subeq sp, sp, #4 )
142
+ stmia sp, {r1 - r12}
133
143
134
144
ldmia r0, {r1 - r3}
135
- add r5, sp, #S_SP @ here for interlock avoidance
145
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
136
146
mov r4, #-1 @ "" "" "" ""
137
- add r0, sp, #(S_FRAME_SIZE + \stack_hole)
138
- SPFIX( addne r0, r0, #4 )
139
- str r1, [sp] @ save the "real" r0 copied
147
+ add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4 )
148
+ SPFIX( addeq r0, r0, #4 )
149
+ str r1, [sp, #-4]! @ save the "real" r0 copied
140
150
@ from the exception stack
141
151
142
152
mov r1, lr
@@ -196,9 +206,8 @@ __dabt_svc:
196
206
@
197
207
@ restore SPSR and restart the instruction
198
208
@
199
- ldr r0, [sp, #S_PSR]
200
- msr spsr_cxsf, r0
201
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
209
+ ldr r2, [sp, #S_PSR]
210
+ svc_exit r2 @ return from exception
202
211
UNWIND(.fnend )
203
212
ENDPROC(__dabt_svc)
204
213
@@ -225,13 +234,12 @@ __irq_svc:
225
234
tst r0, #_TIF_NEED_RESCHED
226
235
blne svc_preempt
227
236
#endif
228
- ldr r0, [sp, #S_PSR] @ irqs are already disabled
229
- msr spsr_cxsf, r0
237
+ ldr r4, [sp, #S_PSR] @ irqs are already disabled
230
238
#ifdef CONFIG_TRACE_IRQFLAGS
231
- tst r0 , #PSR_I_BIT
239
+ tst r4 , #PSR_I_BIT
232
240
bleq trace_hardirqs_on
233
241
#endif
234
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
242
+ svc_exit r4 @ return from exception
235
243
UNWIND(.fnend )
236
244
ENDPROC(__irq_svc)
237
245
@@ -266,7 +274,7 @@ __und_svc:
266
274
@ r0 - instruction
267
275
@
268
276
ldr r0, [r2, #-4]
269
- adr r9, 1f
277
+ adr r9, BSYM(1f)
270
278
bl call_fpe
271
279
272
280
mov r0, sp @ struct pt_regs *regs
@@ -280,9 +288,8 @@ __und_svc:
280
288
@
281
289
@ restore SPSR and restart the instruction
282
290
@
283
- ldr lr, [sp, #S_PSR] @ Get SVC cpsr
284
- msr spsr_cxsf, lr
285
- ldmia sp, {r0 - pc}^ @ Restore SVC registers
291
+ ldr r2, [sp, #S_PSR] @ Get SVC cpsr
292
+ svc_exit r2 @ return from exception
286
293
UNWIND(.fnend )
287
294
ENDPROC(__und_svc)
288
295
@@ -323,9 +330,8 @@ __pabt_svc:
323
330
@
324
331
@ restore SPSR and restart the instruction
325
332
@
326
- ldr r0, [sp, #S_PSR]
327
- msr spsr_cxsf, r0
328
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
333
+ ldr r2, [sp, #S_PSR]
334
+ svc_exit r2 @ return from exception
329
335
UNWIND(.fnend )
330
336
ENDPROC(__pabt_svc)
331
337
@@ -353,7 +359,8 @@ ENDPROC(__pabt_svc)
353
359
UNWIND(.fnstart )
354
360
UNWIND(.cantunwind ) @ don't unwind the user space
355
361
sub sp, sp, #S_FRAME_SIZE
356
- stmib sp, {r1 - r12}
362
+ ARM( stmib sp, {r1 - r12} )
363
+ THUMB( stmia sp, {r0 - r12} )
357
364
358
365
ldmia r0, {r1 - r3}
359
366
add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +379,8 @@ ENDPROC(__pabt_svc)
372
379
@ Also, separately save sp_usr and lr_usr
373
380
@
374
381
stmia r0, {r2 - r4}
375
- stmdb r0, {sp, lr}^
382
+ ARM( stmdb r0, {sp, lr}^ )
383
+ THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
376
384
377
385
@
378
386
@ Enable the alignment trap while in kernel mode
@@ -427,7 +435,7 @@ __dabt_usr:
427
435
@
428
436
enable_irq
429
437
mov r2, sp
430
- adr lr, ret_from_exception
438
+ adr lr, BSYM( ret_from_exception)
431
439
b do_DataAbort
432
440
UNWIND(.fnend )
433
441
ENDPROC(__dabt_usr)
@@ -452,7 +460,9 @@ __irq_usr:
452
460
ldr r0, [tsk, #TI_PREEMPT]
453
461
str r8, [tsk, #TI_PREEMPT]
454
462
teq r0, r7
455
- strne r0, [r0, -r0]
463
+ ARM( strne r0, [r0, -r0] )
464
+ THUMB( movne r0, #0 )
465
+ THUMB( strne r0, [r0] )
456
466
#endif
457
467
#ifdef CONFIG_TRACE_IRQFLAGS
458
468
bl trace_hardirqs_on
@@ -476,9 +486,10 @@ __und_usr:
476
486
@
477
487
@ r0 - instruction
478
488
@
479
- adr r9, ret_from_exception
480
- adr lr, __und_usr_unknown
489
+ adr r9, BSYM( ret_from_exception)
490
+ adr lr, BSYM( __und_usr_unknown)
481
491
tst r3, #PSR_T_BIT @ Thumb mode?
492
+ itet eq @ explicit IT needed for the 1f label
482
493
subeq r4, r2, #4 @ ARM instr at LR - 4
483
494
subne r4, r2, #2 @ Thumb instr at LR - 2
484
495
1: ldreqt r0, [r4]
@@ -488,7 +499,10 @@ __und_usr:
488
499
beq call_fpe
489
500
@ Thumb instruction
490
501
#if __LINUX_ARM_ARCH__ >= 7
491
- 2: ldrht r5, [r4], #2
502
+ 2:
503
+ ARM( ldrht r5, [r4], #2 )
504
+ THUMB( ldrht r5, [r4] )
505
+ THUMB( add r4, r4, #2 )
492
506
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
493
507
cmp r0, #0xe800 @ 32bit instruction if xx != 0
494
508
blo __und_usr_unknown
@@ -577,46 +591,50 @@ call_fpe:
577
591
moveq pc, lr
578
592
get_thread_info r10 @ get current thread
579
593
and r8, r0, #0x00000f00 @ mask out CP number
594
+ THUMB( lsr r8, r8, #8 )
580
595
mov r7, #1
581
596
add r6, r10, #TI_USED_CP
582
- strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
597
+ ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
598
+ THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
583
599
#ifdef CONFIG_IWMMXT
584
600
@ Test if we need to give access to iWMMXt coprocessors
585
601
ldr r5, [r10, #TI_FLAGS]
586
602
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
587
603
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
588
604
bcs iwmmxt_task_enable
589
605
#endif
590
- add pc, pc, r8, lsr #6
591
- mov r0, r0
592
-
593
- mov pc, lr @ CP#0
594
- b do_fpe @ CP#1 (FPE)
595
- b do_fpe @ CP#2 (FPE)
596
- mov pc, lr @ CP#3
606
+ ARM( add pc, pc, r8, lsr #6 )
607
+ THUMB( lsl r8, r8, #2 )
608
+ THUMB( add pc, r8 )
609
+ nop
610
+
611
+ W(mov ) pc, lr @ CP#0
612
+ W(b) do_fpe @ CP#1 (FPE)
613
+ W(b) do_fpe @ CP#2 (FPE)
614
+ W(mov ) pc, lr @ CP#3
597
615
#ifdef CONFIG_CRUNCH
598
616
b crunch_task_enable @ CP#4 (MaverickCrunch)
599
617
b crunch_task_enable @ CP#5 (MaverickCrunch)
600
618
b crunch_task_enable @ CP#6 (MaverickCrunch)
601
619
#else
602
- mov pc, lr @ CP#4
603
- mov pc, lr @ CP#5
604
- mov pc, lr @ CP#6
620
+ W( mov ) pc, lr @ CP#4
621
+ W( mov ) pc, lr @ CP#5
622
+ W( mov ) pc, lr @ CP#6
605
623
#endif
606
- mov pc, lr @ CP#7
607
- mov pc, lr @ CP#8
608
- mov pc, lr @ CP#9
624
+ W( mov ) pc, lr @ CP#7
625
+ W( mov ) pc, lr @ CP#8
626
+ W( mov ) pc, lr @ CP#9
609
627
#ifdef CONFIG_VFP
610
- b do_vfp @ CP#10 (VFP)
611
- b do_vfp @ CP#11 (VFP)
628
+ W(b) do_vfp @ CP#10 (VFP)
629
+ W(b) do_vfp @ CP#11 (VFP)
612
630
#else
613
- mov pc, lr @ CP#10 (VFP)
614
- mov pc, lr @ CP#11 (VFP)
631
+ W( mov ) pc, lr @ CP#10 (VFP)
632
+ W( mov ) pc, lr @ CP#11 (VFP)
615
633
#endif
616
- mov pc, lr @ CP#12
617
- mov pc, lr @ CP#13
618
- mov pc, lr @ CP#14 (Debug)
619
- mov pc, lr @ CP#15 (Control)
634
+ W( mov ) pc, lr @ CP#12
635
+ W( mov ) pc, lr @ CP#13
636
+ W( mov ) pc, lr @ CP#14 (Debug)
637
+ W( mov ) pc, lr @ CP#15 (Control)
620
638
621
639
#ifdef CONFIG_NEON
622
640
.align 6
@@ -667,7 +685,7 @@ no_fp: mov pc, lr
667
685
__und_usr_unknown:
668
686
enable_irq
669
687
mov r0, sp
670
- adr lr, ret_from_exception
688
+ adr lr, BSYM( ret_from_exception)
671
689
b do_undefinstr
672
690
ENDPROC(__und_usr_unknown)
673
691
@@ -711,7 +729,10 @@ ENTRY(__switch_to)
711
729
UNWIND(.cantunwind )
712
730
add ip, r1, #TI_CPU_SAVE
713
731
ldr r3, [r2, #TI_TP_VALUE]
714
- stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
732
+ ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
733
+ THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
734
+ THUMB( str sp, [ip], #4 )
735
+ THUMB( str lr, [ip], #4 )
715
736
#ifdef CONFIG_MMU
716
737
ldr r6, [r2, #TI_CPU_DOMAIN]
717
738
#endif
@@ -736,8 +757,12 @@ ENTRY(__switch_to)
736
757
ldr r0, =thread_notify_head
737
758
mov r1, #THREAD_NOTIFY_SWITCH
738
759
bl atomic_notifier_call_chain
760
+ THUMB( mov ip, r4 )
739
761
mov r0, r5
740
- ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
762
+ ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
763
+ THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
764
+ THUMB( ldr sp, [ip], #4 )
765
+ THUMB( ldr pc, [ip] )
741
766
UNWIND(.fnend )
742
767
ENDPROC(__switch_to)
743
768
@@ -772,6 +797,7 @@ ENDPROC(__switch_to)
772
797
* if your compiled code is not going to use the new instructions for other
773
798
* purpose.
774
799
*/
800
+ THUMB( .arm )
775
801
776
802
.macro usr_ret, reg
777
803
#ifdef CONFIG_ARM_THUMB
@@ -1020,6 +1046,7 @@ __kuser_helper_version: @ 0xffff0ffc
1020
1046
.globl __kuser_helper_end
1021
1047
__kuser_helper_end:
1022
1048
1049
+ THUMB( .thumb )
1023
1050
1024
1051
/*
1025
1052
* Vector stubs.
@@ -1054,15 +1081,17 @@ vector_\name:
1054
1081
@ Prepare for SVC32 mode. IRQs remain disabled.
1055
1082
@
1056
1083
mrs r0, cpsr
1057
- eor r0, r0, #(\mode ^ SVC_MODE)
1084
+ eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE )
1058
1085
msr spsr_cxsf, r0
1059
1086
1060
1087
@
1061
1088
@ the branch table must immediately follow this code
1062
1089
@
1063
1090
and lr, lr, #0x0f
1091
+ THUMB( adr r0, 1f )
1092
+ THUMB( ldr lr, [r0, lr, lsl #2] )
1064
1093
mov r0, sp
1065
- ldr lr, [pc, lr, lsl #2]
1094
+ ARM( ldr lr, [pc, lr, lsl #2] )
1066
1095
movs pc, lr @ branch to handler in SVC mode
1067
1096
ENDPROC(vector_\name)
1068
1097
@@ -1206,14 +1235,16 @@ __stubs_end:
1206
1235
1207
1236
.globl __vectors_start
1208
1237
__vectors_start:
1209
- swi SYS_ERROR0
1210
- b vector_und + stubs_offset
1211
- ldr pc, .LCvswi + stubs_offset
1212
- b vector_pabt + stubs_offset
1213
- b vector_dabt + stubs_offset
1214
- b vector_addrexcptn + stubs_offset
1215
- b vector_irq + stubs_offset
1216
- b vector_fiq + stubs_offset
1238
+ ARM( swi SYS_ERROR0 )
1239
+ THUMB( svc #0 )
1240
+ THUMB( nop )
1241
+ W(b) vector_und + stubs_offset
1242
+ W(ldr) pc, .LCvswi + stubs_offset
1243
+ W(b) vector_pabt + stubs_offset
1244
+ W(b) vector_dabt + stubs_offset
1245
+ W(b) vector_addrexcptn + stubs_offset
1246
+ W(b) vector_irq + stubs_offset
1247
+ W(b) vector_fiq + stubs_offset
1217
1248
1218
1249
.globl __vectors_end
1219
1250
__vectors_end:
0 commit comments