1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
|
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2014, Linaro Limited
* All rights reserved.
*/
#include <arm32_macros.S>
#include <arm.h>
#include <asm.S>
#include <keep.h>
#include <kernel/asan.h>
#include <kernel/cache_helpers.h>
#include <kernel/unwind.h>
#include <platform_config.h>
#include <sm/optee_smc.h>
#include <sm/teesmc_opteed.h>
#include <sm/teesmc_opteed_macros.h>
.section .data
.balign 4
#ifdef CFG_BOOT_SYNC_CPU
.equ SEM_CPU_READY, 1
#endif
#ifdef CFG_PL310
.section .rodata.init
panic_boot_file:
.asciz __FILE__
/*
* void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
*/
LOCAL_FUNC __assert_flat_mapped_range , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
push { r4-r6, lr }
mov r4, r0
mov r5, r1
bl cpu_mmu_enabled
cmp r0, #0
beq 1f
mov r0, r4
bl virt_to_phys
cmp r0, r4
beq 1f
/*
* this must be compliant with the panic generic routine:
* __do_panic(__FILE__, __LINE__, __func__, str)
*/
ldr r0, =panic_boot_file
mov r1, r5
mov r2, #0
mov r3, #0
bl __do_panic
b . /* should NOT return */
1: pop { r4-r6, pc }
UNWIND( .fnend)
END_FUNC __assert_flat_mapped_range
/* panic if mmu is enable and vaddr != paddr (scratch lr) */
.macro assert_flat_mapped_range va, line
ldr r0, =(\va)
ldr r1, =\line
bl __assert_flat_mapped_range
.endm
#endif /* CFG_PL310 */
.weak plat_cpu_reset_early
FUNC plat_cpu_reset_early , :
UNWIND( .fnstart)
bx lr
UNWIND( .fnend)
END_FUNC plat_cpu_reset_early
KEEP_PAGER plat_cpu_reset_early
.section .text.reset_vect_table
.align 5
LOCAL_FUNC reset_vect_table , :
b .
b . /* Undef */
b . /* Syscall */
b . /* Prefetch abort */
b . /* Data abort */
b . /* Reserved */
b . /* IRQ */
b . /* FIQ */
END_FUNC reset_vect_table
.macro cpu_is_ready
#ifdef CFG_BOOT_SYNC_CPU
bl get_core_pos
lsl r0, r0, #2
ldr r1,=sem_cpu_sync
ldr r2, =SEM_CPU_READY
str r2, [r1, r0]
dsb
sev
#endif
.endm
.macro wait_primary
#ifdef CFG_BOOT_SYNC_CPU
ldr r0, =sem_cpu_sync
mov r2, #SEM_CPU_READY
sev
1:
ldr r1, [r0]
cmp r1, r2
wfene
bne 1b
#endif
.endm
.macro wait_secondary
#ifdef CFG_BOOT_SYNC_CPU
ldr r0, =sem_cpu_sync
mov r3, #CFG_TEE_CORE_NB_CORE
mov r2, #SEM_CPU_READY
sev
1:
subs r3, r3, #1
beq 3f
add r0, r0, #4
2:
ldr r1, [r0]
cmp r1, r2
wfene
bne 2b
b 1b
3:
#endif
.endm
/*
* set_sctlr : Setup some core configuration in CP15 SCTLR
*
* Setup required by current implementation of the OP-TEE core:
* - Disable data and instruction cache.
* - MMU is expected off and exceptions trapped in ARM mode.
* - Enable or disable alignment checks upon platform configuration.
* - Optinally enable write-implies-execute-never.
* - Optinally enable round robin strategy for cache replacement.
*
* Clobbers r0.
*/
.macro set_sctlr
read_sctlr r0
bic r0, r0, #(SCTLR_M | SCTLR_C)
bic r0, r0, #SCTLR_I
bic r0, r0, #SCTLR_TE
#if defined(CFG_SCTLR_ALIGNMENT_CHECK)
orr r0, r0, #SCTLR_A
#else
bic r0, r0, #SCTLR_A
#endif
#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC)
orr r0, r0, #(SCTLR_WXN | SCTLR_UWXN)
#endif
#if defined(CFG_ENABLE_SCTLR_RR)
orr r0, r0, #SCTLR_RR
#endif
write_sctlr r0
.endm
/*
* enable_branch_prediction : manually enable branch prediction
*
* This macro targets only ARMv7 architecture and hence conditionned
* by configuration directive CFG_ENABLE_SCTLR_Z. For recent
* architectures, the program flow prediction is automatically enable
* upon MMU enablement.
*/
.macro enable_branch_prediction
#if defined(CFG_ENABLE_SCTLR_Z)
read_sctlr r0
/* Some ARMv7 architectures need btac flush with post synchro */
write_bpiall
dsb
isb
orr r0, r0, #SCTLR_Z
write_sctlr r0
#endif
.endm
/*
* Save boot arguments
* entry r0, saved r4: pagestore
* entry r1, saved r7: (ARMv7 standard bootarg #1)
* entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
* entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
*/
.macro bootargs_entry
#if defined(CFG_NS_ENTRY_ADDR)
ldr r5, =CFG_NS_ENTRY_ADDR
#else
mov r5, lr
#endif
#if defined(CFG_PAGEABLE_ADDR)
ldr r4, =CFG_PAGEABLE_ADDR
#else
mov r4, r0
#endif
#if defined(CFG_DT_ADDR)
ldr r6, =CFG_DT_ADDR
#else
mov r6, r2
#endif
mov r7, r1
.endm
FUNC _start , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
bootargs_entry
/* Early ARM secure MP specific configuration */
bl plat_cpu_reset_early
set_sctlr
isb
ldr r0, =reset_vect_table
write_vbar r0
#if defined(CFG_WITH_ARM_TRUSTED_FW)
b reset_primary
#else
bl get_core_pos
cmp r0, #0
beq reset_primary
b reset_secondary
#endif
UNWIND( .fnend)
END_FUNC _start
KEEP_INIT _start
/*
* Setup sp to point to the top of the tmp stack for the current CPU:
* sp is assigned stack_tmp_export + cpu_id * stack_tmp_stride
*/
.macro set_sp
bl get_core_pos
cmp r0, #CFG_TEE_CORE_NB_CORE
/* Unsupported CPU, park it before it breaks something */
bge unhandled_cpu
ldr r1, =stack_tmp_stride
ldr r1, [r1]
mul r1, r0, r1
ldr r0, =stack_tmp_export
ldr r0, [r0]
add sp, r1, r0
.endm
/*
* Cache maintenance during entry: handle outer cache.
* End address is exclusive: first byte not to be changed.
* Note however arm_clX_inv/cleanbyva operate on full cache lines.
*
* Use ANSI #define to trap source file line number for PL310 assertion
*/
.macro __inval_cache_vrange vbase, vend, line
#ifdef CFG_PL310
assert_flat_mapped_range (\vbase), (\line)
bl pl310_base
ldr r1, =(\vbase)
ldr r2, =(\vend)
bl arm_cl2_invbypa
#endif
ldr r0, =(\vbase)
ldr r1, =(\vend)
sub r1, r1, r0
bl dcache_inv_range
.endm
.macro __flush_cache_vrange vbase, vend, line
#ifdef CFG_PL310
assert_flat_mapped_range (\vbase), (\line)
ldr r0, =(\vbase)
ldr r1, =(\vend)
sub r1, r1, r0
bl dcache_clean_range
bl pl310_base
ldr r1, =(\vbase)
ldr r2, =(\vend)
bl arm_cl2_cleaninvbypa
#endif
ldr r0, =(\vbase)
ldr r1, =(\vend)
sub r1, r1, r0
bl dcache_cleaninv_range
.endm
#define inval_cache_vrange(vbase, vend) \
__inval_cache_vrange (vbase), ((vend) - 1), __LINE__
#define flush_cache_vrange(vbase, vend) \
__flush_cache_vrange (vbase), ((vend) - 1), __LINE__
#ifdef CFG_BOOT_SYNC_CPU
#define flush_cpu_semaphores \
flush_cache_vrange(sem_cpu_sync, \
(sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
#else
#define flush_cpu_semaphores
#endif
LOCAL_FUNC reset_primary , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
/* preserve r4-r7: bootargs */
#ifdef CFG_WITH_PAGER
/*
* Move init code into correct location and move hashes to a
* temporary safe location until the heap is initialized.
*
* The binary is built as:
* [Pager code, rodata and data] : In correct location
* [Init code and rodata] : Should be copied to __init_start
* [Hashes] : Should be saved before initializing pager
*
*/
ldr r0, =__init_start /* dst */
ldr r1, =__data_end /* src */
ldr r2, =__tmp_hashes_end /* dst limit */
/* Copy backwards (as memmove) in case we're overlapping */
sub r2, r2, r0 /* len */
add r0, r0, r2
add r1, r1, r2
ldr r2, =__init_start
copy_init:
ldmdb r1!, {r3, r8-r12, sp}
stmdb r0!, {r3, r8-r12, sp}
cmp r0, r2
bgt copy_init
#endif
/*
* Clear .bss, this code obviously depends on the linker keeping
* start/end of .bss at least 8 byte aligned.
*/
ldr r0, =__bss_start
ldr r1, =__bss_end
mov r2, #0
mov r3, #0
clear_bss:
stmia r0!, {r2, r3}
cmp r0, r1
bls clear_bss
#ifdef CFG_CORE_SANITIZE_KADDRESS
/* First initialize the entire shadow area with no access */
ldr r0, =__asan_shadow_start /* start */
ldr r1, =__asan_shadow_end /* limit */
mov r2, #ASAN_DATA_RED_ZONE
shadow_no_access:
str r2, [r0], #4
cmp r0, r1
bls shadow_no_access
/* Mark the entire stack area as OK */
ldr r2, =CFG_ASAN_SHADOW_OFFSET
ldr r0, =__nozi_stack_start /* start */
lsr r0, r0, #ASAN_BLOCK_SHIFT
add r0, r0, r2
ldr r1, =__nozi_stack_end /* limit */
lsr r1, r1, #ASAN_BLOCK_SHIFT
add r1, r1, r2
mov r2, #0
shadow_stack_access_ok:
strb r2, [r0], #1
cmp r0, r1
bls shadow_stack_access_ok
#endif
set_sp
/* complete ARM secure MP common configuration */
bl plat_cpu_reset_late
/* Enable Console */
bl console_init
#ifdef CFG_PL310
bl pl310_base
bl arm_cl2_config
#endif
/*
* Invalidate dcache for all memory used during initialization to
* avoid nasty surprices when the cache is turned on. We must not
* invalidate memory not used by OP-TEE since we may invalidate
* entries used by for instance ARM Trusted Firmware.
*/
#ifdef CFG_WITH_PAGER
inval_cache_vrange(__text_start, __tmp_hashes_end)
#else
inval_cache_vrange(__text_start, __end)
#endif
#ifdef CFG_PL310
/* Enable PL310 if not yet enabled */
bl pl310_base
bl arm_cl2_enable
#endif
bl core_init_mmu_map
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
bl cpu_mmu_enable_dcache
enable_branch_prediction
mov r0, r4 /* pageable part address */
mov r1, r5 /* ns-entry address */
mov r2, r6 /* DT address */
bl generic_boot_init_primary
mov r4, r0 /* save entry test vector */
/*
* In case we've touched memory that secondary CPUs will use before
* they have turned on their D-cache, clean and invalidate the
* D-cache before exiting to normal world.
*/
#ifdef CFG_WITH_PAGER
flush_cache_vrange(__text_start, __init_end)
#else
flush_cache_vrange(__text_start, __end)
#endif
/* release secondary boot cores and sync with them */
cpu_is_ready
flush_cpu_semaphores
wait_secondary
#ifdef CFG_PL310_LOCKED
/* lock/invalidate all lines: pl310 behaves as if disable */
bl pl310_base
bl arm_cl2_lockallways
bl pl310_base
bl arm_cl2_cleaninvbyway
#endif
/*
* Clear current thread id now to allow the thread to be reused on
* next entry. Matches the thread_init_boot_thread() in
* generic_boot.c.
*/
bl thread_clr_boot_thread
#if defined(CFG_WITH_ARM_TRUSTED_FW)
/* Pass the vector address returned from main_init */
mov r1, r4
#else
/* realy standard bootarg #1 and #2 to non secure entry */
mov r4, #0
mov r3, r6 /* std bootarg #2 for register R2 */
mov r2, r7 /* std bootarg #1 for register R1 */
mov r1, #0
#endif /* CFG_WITH_ARM_TRUSTED_FW */
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC reset_primary
LOCAL_FUNC unhandled_cpu , :
UNWIND( .fnstart)
wfi
b unhandled_cpu
UNWIND( .fnend)
END_FUNC unhandled_cpu
#if defined(CFG_WITH_ARM_TRUSTED_FW)
FUNC cpu_on_handler , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
mov r4, r0
mov r5, r1
mov r6, lr
set_sctlr
isb
ldr r0, =reset_vect_table
write_vbar r0
mov r4, lr
set_sp
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
bl cpu_mmu_enable_dcache
enable_branch_prediction
mov r0, r4
mov r1, r5
bl generic_boot_cpu_on_handler
bx r6
UNWIND( .fnend)
END_FUNC cpu_on_handler
KEEP_PAGER cpu_on_handler
#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
LOCAL_FUNC reset_secondary , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
ldr r0, =reset_vect_table
write_vbar r0
wait_primary
set_sp
bl plat_cpu_reset_late
#if defined (CFG_BOOT_SECONDARY_REQUEST)
/* if L1 is not invalidated before, do it here */
mov r0, #DCACHE_OP_INV
bl dcache_op_level1
#endif
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
bl cpu_mmu_enable_dcache
enable_branch_prediction
cpu_is_ready
#if defined (CFG_BOOT_SECONDARY_REQUEST)
/* generic_boot_core_hpen return value (r0) is ns entry point */
bl generic_boot_core_hpen
#else
mov r0, r5 /* ns-entry address */
#endif
bl generic_boot_init_secondary
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
mov r1, #0
mov r2, #0
mov r3, #0
mov r4, #0
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC reset_secondary
KEEP_PAGER reset_secondary
#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
|