diff options
author | Jens Wiklander <jens.wiklander@linaro.org> | 2018-03-09 17:13:25 +0100 |
---|---|---|
committer | Jérôme Forissier <jerome.forissier@linaro.org> | 2018-04-03 18:07:07 +0200 |
commit | 211417d3a487e33e20ad3994837b313412b868a3 (patch) | |
tree | c1729c7296bdc8582038913c4a08fb893b654eef /core | |
parent | a8d84b5836934ed78dfe931eed502317569ec931 (diff) |
core: more flexible ta mapping
Replaces the current fixed array of TA map entries where some indexes
have a special meaning. The new structures and functions dealing with this
has a vm_ prefix instead of the old tee_mmu_ prefix.
struct tee_ta_region is replaced by struct vm_region, which is now
stored in a linked list using the new TEE_MATTR-bits to identify special
regions.
struct tee_mmu_info is replaced by vm_info, which now keeps the head of
the linked list of regions.
Acked-by: Etienne Carriere <etienne.carriere@linaro.org>
Acked-by: Jerome Forissier <jerome.forissier@linaro.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Diffstat (limited to 'core')
-rw-r--r-- | core/arch/arm/include/kernel/user_ta.h | 5 | ||||
-rw-r--r-- | core/arch/arm/kernel/abort.c | 2 | ||||
-rw-r--r-- | core/arch/arm/kernel/user_ta.c | 175 | ||||
-rw-r--r-- | core/arch/arm/mm/core_mmu.c | 35 | ||||
-rw-r--r-- | core/arch/arm/mm/core_mmu_lpae.c | 2 | ||||
-rw-r--r-- | core/arch/arm/mm/core_mmu_v7.c | 2 | ||||
-rw-r--r-- | core/arch/arm/mm/tee_mmu.c | 789 | ||||
-rw-r--r-- | core/include/mm/tee_mmu.h | 28 | ||||
-rw-r--r-- | core/include/mm/tee_mmu_types.h | 12 | ||||
-rw-r--r-- | core/kernel/tee_ta_manager.c | 2 |
10 files changed, 533 insertions, 519 deletions
diff --git a/core/arch/arm/include/kernel/user_ta.h b/core/arch/arm/include/kernel/user_ta.h index 0eb26031..8b201143 100644 --- a/core/arch/arm/include/kernel/user_ta.h +++ b/core/arch/arm/include/kernel/user_ta.h @@ -32,8 +32,9 @@ struct user_ta_ctx { struct tee_storage_enum_head storage_enums; struct mobj *mobj_code; /* secure world memory */ struct mobj *mobj_stack; /* stack */ - uint32_t load_addr; /* elf load addr (from TAs address space) */ - struct tee_mmu_info *mmu; /* Saved MMU information (ddr only) */ + vaddr_t stack_addr; /* Virtual address of stack */ + vaddr_t load_addr; /* elf load addr (from TAs address space) */ + struct vm_info *vm_info;/* Virtual memory map of this context */ void *ta_time_offs; /* Time reference used by the TA */ struct tee_pager_area_head *areas; #if defined(CFG_SE_API) diff --git a/core/arch/arm/kernel/abort.c b/core/arch/arm/kernel/abort.c index d7c87e5c..b82ec3d8 100644 --- a/core/arch/arm/kernel/abort.c +++ b/core/arch/arm/kernel/abort.c @@ -146,7 +146,7 @@ static void __print_stack_unwind_arm64(struct abort_info *ai) utc = to_user_ta_ctx(s->ctx); /* User stack */ - stack = (uaddr_t)utc->mmu->regions[0].va; + stack = utc->stack_addr; stack_size = utc->mobj_stack->size; } else { /* Kernel stack */ diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c index 636ae990..7d1cf0c8 100644 --- a/core/arch/arm/kernel/user_ta.c +++ b/core/arch/arm/kernel/user_ta.c @@ -31,6 +31,7 @@ #include <compiler.h> #include <keep.h> #include <kernel/panic.h> +#include <kernel/tee_misc.h> #include <kernel/tee_ta_manager.h> #include <kernel/thread.h> #include <kernel/user_ta.h> @@ -65,15 +66,13 @@ static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs) uint32_t mattr = 0; if (init_attrs) - mattr = TEE_MATTR_PRW; - else { - if (flags & PF_X) - mattr |= TEE_MATTR_UX; - if (flags & PF_W) - mattr |= TEE_MATTR_UW; - if (flags & PF_R) - mattr |= TEE_MATTR_UR; - } + mattr |= TEE_MATTR_PRW; + if (flags & PF_X) + mattr |= TEE_MATTR_UX; + if (flags & PF_W) + mattr |= TEE_MATTR_UW; + if (flags & PF_R) + mattr |= TEE_MATTR_UR; return mattr; } @@ -81,35 +80,33 @@ static uint32_t elf_flags_to_mattr(uint32_t flags, bool init_attrs) #ifdef CFG_PAGED_USER_TA static TEE_Result config_initial_paging(struct user_ta_ctx *utc) { - size_t n; + struct vm_region *r; - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - if (!utc->mmu->regions[n].size) + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { + if (r->attr & (TEE_MATTR_EPHEMERAL | TEE_MATTR_PERMANENT)) continue; - if (!tee_pager_add_uta_area(utc, utc->mmu->regions[n].va, - utc->mmu->regions[n].size)) + if (!tee_pager_add_uta_area(utc, r->va, r->size)) return TEE_ERROR_GENERIC; } + return TEE_SUCCESS; } static TEE_Result config_final_paging(struct user_ta_ctx *utc) { - size_t n; + struct vm_region *r; uint32_t flags; tee_pager_assign_uta_tables(utc); - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - if (!utc->mmu->regions[n].size) + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { + if (r->attr & (TEE_MATTR_EPHEMERAL | TEE_MATTR_PERMANENT)) continue; - flags = utc->mmu->regions[n].attr & - (TEE_MATTR_PRW | TEE_MATTR_URWX); - if (!tee_pager_set_uta_area_attr(utc, utc->mmu->regions[n].va, - utc->mmu->regions[n].size, - flags)) + flags = r->attr & TEE_MATTR_PROT_MASK; + if (!tee_pager_set_uta_area_attr(utc, r->va, r->size, flags)) return TEE_ERROR_GENERIC; } + return TEE_SUCCESS; } #else /*!CFG_PAGED_USER_TA*/ @@ -120,29 +117,50 @@ static TEE_Result config_initial_paging(struct user_ta_ctx *utc __unused) static TEE_Result config_final_paging(struct user_ta_ctx *utc) { - void *va = (void *)utc->mmu->ta_private_vmem_start; - size_t vasize = utc->mmu->ta_private_vmem_end - - utc->mmu->ta_private_vmem_start; + vaddr_t vstart; + vaddr_t vend; - cache_op_inner(DCACHE_AREA_CLEAN, va, vasize); - cache_op_inner(ICACHE_AREA_INVALIDATE, va, vasize); + vm_info_get_user_range(utc, &vstart, &vend); + cache_op_inner(DCACHE_AREA_CLEAN, (void *)vstart, vend - vstart); + cache_op_inner(ICACHE_AREA_INVALIDATE, (void *)vstart, vend - vstart); return TEE_SUCCESS; } #endif /*!CFG_PAGED_USER_TA*/ +struct load_seg { + vaddr_t offs; + uint32_t prot; + vaddr_t oend; +}; + +static int cmp_load_seg(const void *a0, const void *a1) +{ + const struct load_seg *s0 = a0; + const struct load_seg *s1 = a1; + + return CMP_TRILEAN(s0->offs, s1->offs); +} + static TEE_Result load_elf_segments(struct user_ta_ctx *utc, struct elf_load_state *elf_state, bool init_attrs) { TEE_Result res; - uint32_t mattr; size_t idx = 0; + size_t num_segs = 0; + struct load_seg *segs = NULL; - tee_mmu_map_init(utc); + res = tee_mmu_map_init(utc); + if (res) + return res; /* * Add stack segment */ - tee_mmu_map_stack(utc, utc->mobj_stack); + utc->stack_addr = 0; + res = vm_map(utc, &utc->stack_addr, utc->mobj_stack->size, + TEE_MATTR_URW | TEE_MATTR_PRW, utc->mobj_stack, 0); + if (res) + return res; /* * Add code segment @@ -161,17 +179,64 @@ static TEE_Result load_elf_segments(struct user_ta_ctx *utc, return res; if (type == PT_LOAD) { - mattr = elf_flags_to_mattr(flags, init_attrs); - res = tee_mmu_map_add_segment(utc, utc->mobj_code, - offs, size, mattr); - if (res != TEE_SUCCESS) - return res; + void *p = realloc(segs, (num_segs + 1) * sizeof(*segs)); + + if (!p) { + free(segs); + return TEE_ERROR_OUT_OF_MEMORY; + } + segs = p; + segs[num_segs].offs = ROUNDDOWN(offs, SMALL_PAGE_SIZE); + segs[num_segs].oend = ROUNDUP(offs + size, + SMALL_PAGE_SIZE); + segs[num_segs].prot = elf_flags_to_mattr(flags, + init_attrs); + num_segs++; } else if (type == PT_ARM_EXIDX) { utc->exidx_start = offs; utc->exidx_size = size; } } + /* Sort in case load sections aren't in order */ + qsort(segs, num_segs, sizeof(*segs), cmp_load_seg); + + idx = 1; + while (idx < num_segs) { + size_t this_size = segs[idx].oend - segs[idx].offs; + size_t prev_size = segs[idx - 1].oend - segs[idx - 1].offs; + + if (core_is_buffer_intersect(segs[idx].offs, this_size, + segs[idx - 1].offs, prev_size)) { + /* Merge the segments and their attributes */ + segs[idx - 1].oend = MAX(segs[idx - 1].oend, + segs[idx].oend); + segs[idx - 1].prot |= segs[idx].prot; + + /* Remove this index */ + memcpy(segs + idx, segs + idx + 1, + (num_segs - idx - 1) * sizeof(*segs)); + num_segs--; + } else { + idx++; + } + } + + utc->load_addr = 0; + for (idx = 0; idx < num_segs; idx++) { + vaddr_t va = utc->load_addr - segs[0].offs + segs[idx].offs; + + res = vm_map(utc, &va, segs[idx].oend - segs[idx].offs, + segs[idx].prot, utc->mobj_code, segs[idx].offs); + if (res) + break; + if (!idx) + utc->load_addr = va; + } + free(segs); + if (res) + return res; + if (init_attrs) return config_initial_paging(utc); else @@ -233,7 +298,7 @@ static TEE_Result load_elf(struct user_ta_ctx *utc, * Map physical memory into TA virtual memory */ - res = tee_mmu_init(utc); + res = vm_info_init(utc); if (res != TEE_SUCCESS) goto out; @@ -243,7 +308,7 @@ static TEE_Result load_elf(struct user_ta_ctx *utc, tee_mmu_set_ctx(&utc->ctx); - res = elf_load_body(elf_state, tee_mmu_get_load_addr(&utc->ctx)); + res = elf_load_body(elf_state, utc->load_addr); if (res != TEE_SUCCESS) goto out; @@ -297,7 +362,6 @@ static TEE_Result ta_load(const TEE_UUID *uuid, if (res != TEE_SUCCESS) goto error_return; - utc->load_addr = tee_mmu_get_load_addr(&utc->ctx); ta_head = (struct ta_head *)(vaddr_t)utc->load_addr; if (memcmp(&ta_head->uuid, uuid, sizeof(TEE_UUID)) != 0) { @@ -314,7 +378,7 @@ static TEE_Result ta_load(const TEE_UUID *uuid, goto error_return; } - DMSG("ELF load address 0x%x", utc->load_addr); + DMSG("ELF load address %#" PRIxVA, utc->load_addr); utc->ctx.flags = ta_head->flags; utc->ctx.uuid = ta_head->uuid; utc->entry_func = ta_head->entry.ptr64; @@ -333,7 +397,7 @@ error_return: if (utc) { pgt_flush_ctx(&utc->ctx); tee_pager_rem_uta_areas(utc); - tee_mmu_final(utc); + vm_info_final(utc); mobj_free(utc->mobj_code); mobj_free(utc->mobj_stack); free(utc); @@ -430,8 +494,7 @@ static TEE_Result user_ta_enter(TEE_ErrorOrigin *err, tee_ta_push_current_session(session); /* Make room for usr_params at top of stack */ - usr_stack = (uaddr_t)utc->mmu->regions[TEE_MMU_UMAP_STACK_IDX].va + - utc->mobj_stack->size; + usr_stack = utc->stack_addr + utc->mobj_stack->size; usr_stack -= ROUNDUP(sizeof(struct utee_params), STACK_ALIGNMENT); usr_params = (struct utee_params *)usr_stack; init_utee_param(usr_params, param, param_va); @@ -502,28 +565,26 @@ static void user_ta_enter_close_session(struct tee_ta_session *s) static void user_ta_dump_state(struct tee_ta_ctx *ctx) { struct user_ta_ctx *utc __maybe_unused = to_user_ta_ctx(ctx); + struct vm_region *r; char flags[7] = { '\0', }; - size_t n; + size_t n = 0; - EMSG_RAW(" arch: %s load address: 0x%x ctx-idr: %d", + EMSG_RAW(" arch: %s load address: %#" PRIxVA " ctx-idr: %d", utc->is_32bit ? "arm" : "aarch64", utc->load_addr, - utc->mmu->asid); + utc->vm_info->asid); EMSG_RAW(" stack: 0x%" PRIxVA " %zu", - utc->mmu->regions[TEE_MMU_UMAP_STACK_IDX].va, - utc->mobj_stack->size); - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { + utc->stack_addr, utc->mobj_stack->size); + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { paddr_t pa = 0; - if (utc->mmu->regions[n].mobj) - mobj_get_pa(utc->mmu->regions[n].mobj, - utc->mmu->regions[n].offset, 0, &pa); + if (r->mobj) + mobj_get_pa(r->mobj, r->offset, 0, &pa); - mattr_perm_to_str(flags, sizeof(flags), - utc->mmu->regions[n].attr); + mattr_perm_to_str(flags, sizeof(flags), r->attr); EMSG_RAW(" region %zu: va %#" PRIxVA " pa %#" PRIxPA " size %#zx flags %s", - n, utc->mmu->regions[n].va, pa, - utc->mmu->regions[n].size, flags); + n, r->va, pa, r->size, flags); + n++; } } KEEP_PAGER(user_ta_dump_state); @@ -570,7 +631,7 @@ static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx) &utc->open_sessions, KERN_IDENTITY); } - tee_mmu_final(utc); + vm_info_final(utc); mobj_free(utc->mobj_code); mobj_free(utc->mobj_stack); @@ -585,7 +646,7 @@ static void user_ta_ctx_destroy(struct tee_ta_ctx *ctx) static uint32_t user_ta_get_instance_id(struct tee_ta_ctx *ctx) { - return to_user_ta_ctx(ctx)->mmu->asid; + return to_user_ta_ctx(ctx)->vm_info->asid; } static const struct tee_ta_ops user_ta_ops __rodata_unpaged = { diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c index f7c283d5..72be5b17 100644 --- a/core/arch/arm/mm/core_mmu.c +++ b/core/arch/arm/mm/core_mmu.c @@ -1285,7 +1285,7 @@ static void set_region(struct core_mmu_table_info *tbl_info, } static void set_pg_region(struct core_mmu_table_info *dir_info, - struct tee_ta_region *region, struct pgt **pgt, + struct vm_region *region, struct pgt **pgt, struct core_mmu_table_info *pg_info) { struct tee_mmap_region r = { @@ -1524,36 +1524,29 @@ void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, struct core_mmu_table_info pg_info; struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache; struct pgt *pgt; - size_t n; + struct vm_region *r; + struct vm_region *r_last; - /* Find the last valid entry */ - n = ARRAY_SIZE(utc->mmu->regions); - while (true) { - n--; - if (utc->mmu->regions[n].size) - break; - if (!n) - return; /* Nothing to map */ - } + /* Find the first and last valid entry */ + r = TAILQ_FIRST(&utc->vm_info->regions); + if (!r) + return; /* Nothing to map */ + r_last = TAILQ_LAST(&utc->vm_info->regions, vm_region_head); /* * Allocate all page tables in advance. */ - pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->regions[0].va, - utc->mmu->regions[n].va + utc->mmu->regions[n].size - 1); + pgt_alloc(pgt_cache, &utc->ctx, r->va, + r_last->va + r_last->size - 1); pgt = SLIST_FIRST(pgt_cache); core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) - mobj_update_mapping(utc->mmu->regions[n].mobj, utc, - utc->mmu->regions[n].va); + TAILQ_FOREACH(r, &utc->vm_info->regions, link) + mobj_update_mapping(r->mobj, utc, r->va); - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - if (!utc->mmu->regions[n].size) - continue; - set_pg_region(dir_info, utc->mmu->regions + n, &pgt, &pg_info); - } + TAILQ_FOREACH(r, &utc->vm_info->regions, link) + set_pg_region(dir_info, r, &pgt, &pg_info); } bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c index 43342531..3e029cbf 100644 --- a/core/arch/arm/mm/core_mmu_lpae.c +++ b/core/arch/arm/mm/core_mmu_lpae.c @@ -529,7 +529,7 @@ void core_mmu_create_user_map(struct user_ta_ctx *utc, memset(dir_info.table, 0, PGT_SIZE); core_mmu_populate_user_map(&dir_info, utc); map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC; - map->asid = utc->mmu->asid; + map->asid = utc->vm_info->asid; } bool core_mmu_find_table(vaddr_t va, unsigned max_level, diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index cf4ef7c5..613feb9f 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -497,7 +497,7 @@ void core_mmu_create_user_map(struct user_ta_ctx *utc, memset(dir_info.table, 0, dir_info.num_entries * sizeof(uint32_t)); core_mmu_populate_user_map(&dir_info, utc); map->ttbr0 = core_mmu_get_ul1_ttb_pa() | TEE_MMU_DEFAULT_ATTRS; - map->ctxid = utc->mmu->asid; + map->ctxid = utc->vm_info->asid; } bool core_mmu_find_table(vaddr_t va, unsigned max_level, diff --git a/core/arch/arm/mm/tee_mmu.c b/core/arch/arm/mm/tee_mmu.c index 2826d6da..baea85c2 100644 --- a/core/arch/arm/mm/tee_mmu.c +++ b/core/arch/arm/mm/tee_mmu.c @@ -78,153 +78,162 @@ static bitstr_t bit_decl(g_asid, MMU_NUM_ASIDS); static unsigned int g_asid_spinlock = SPINLOCK_UNLOCK; -static TEE_Result tee_mmu_umap_add_param(struct tee_mmu_info *mmu, - struct param_mem *mem) +static vaddr_t select_va_in_range(vaddr_t prev_end, uint32_t prev_attr, + vaddr_t next_begin, uint32_t next_attr, + const struct vm_region *reg) { - TEE_Result res; - struct tee_ta_region *last_entry = NULL; - size_t n; - uint32_t attr = TEE_MMU_UDATA_ATTR; - size_t nsz; - size_t noffs; - size_t phys_offs; + size_t granul; + const uint32_t a = TEE_MATTR_EPHEMERAL | TEE_MATTR_PERMANENT; + size_t pad; + vaddr_t begin_va; + vaddr_t end_va; - if (!mobj_is_paged(mem->mobj)) { - uint32_t cattr; + /* + * Insert an unmapped entry to separate regions with differing + * TEE_MATTR_EPHEMERAL TEE_MATTR_PERMANENT bits as they never are + * to be contiguous with another region. + */ + if (prev_attr && (prev_attr & a) != (reg->attr & a)) + pad = SMALL_PAGE_SIZE; + else + pad = 0; - res = mobj_get_cattr(mem->mobj, &cattr); - if (res != TEE_SUCCESS) - return res; - attr |= cattr << TEE_MATTR_CACHE_SHIFT; + granul = SMALL_PAGE_SIZE; +#ifndef CFG_WITH_LPAE + if ((prev_attr & TEE_MATTR_SECURE) != (reg->attr & TEE_MATTR_SECURE)) + granul = CORE_MMU_PGDIR_SIZE; +#endif + begin_va = ROUNDUP(prev_end + pad, granul); + if (reg->va) { + if (reg->va < begin_va) + return 0; + begin_va = reg->va; } - if (!mobj_is_secure(mem->mobj)) - attr &= ~TEE_MATTR_SECURE; - - /* Check that we can map memory using this attribute */ - if (!core_mmu_mattr_is_ok(attr)) - return TEE_ERROR_BAD_PARAMETERS; + if (next_attr && (next_attr & a) != (reg->attr & a)) + pad = SMALL_PAGE_SIZE; + else + pad = 0; - /* Find empty entry */ - for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) - if (!mmu->regions[n].size) - break; + granul = SMALL_PAGE_SIZE; +#ifndef CFG_WITH_LPAE + if ((next_attr & TEE_MATTR_SECURE) != (reg->attr & TEE_MATTR_SECURE)) + granul = CORE_MMU_PGDIR_SIZE; +#endif + end_va = ROUNDUP(begin_va + reg->size + pad, granul); - if (n == TEE_MMU_UMAP_MAX_ENTRIES) { - /* No entries left "can't happen" */ - return TEE_ERROR_EXCESS_DATA; + if (end_va <= next_begin) { + assert(!reg->va || reg->va == begin_va); + return begin_va; } - phys_offs = mobj_get_phys_offs(mem->mobj, CORE_MMU_USER_PARAM_SIZE); + return 0; +} + +static TEE_Result umap_add_region(struct vm_info *vmi, struct vm_region *reg) +{ + struct vm_region *r; + struct vm_region *prev_r; + vaddr_t va_range_base; + size_t va_range_size; + vaddr_t va; + + core_mmu_get_user_va_range(&va_range_base, &va_range_size); - mmu->regions[n].mobj = mem->mobj; - mmu->regions[n].offset = ROUNDDOWN(phys_offs + mem->offs, - CORE_MMU_USER_PARAM_SIZE); - mmu->regions[n].size = ROUNDUP(phys_offs + mem->offs - - mmu->regions[n].offset + mem->size, - CORE_MMU_USER_PARAM_SIZE); - mmu->regions[n].attr = attr; + /* Check alignment, it has to be at least SMALL_PAGE based */ + if ((reg->va | reg->size) & SMALL_PAGE_MASK) + return TEE_ERROR_ACCESS_CONFLICT; - /* Try to coalesce some entries */ - while (true) { - /* Find last param */ - n = TEE_MMU_UMAP_MAX_ENTRIES - 1; - - while (!mmu->regions[n].size) { - n--; - if (n < TEE_MMU_UMAP_PARAM_IDX) { - /* No param entries found, "can't happen" */ - return TEE_ERROR_BAD_STATE; + /* Check that the mobj is defined for the entire range */ + if ((reg->offset + reg->size) > + ROUNDUP(reg->mobj->size, SMALL_PAGE_SIZE)) + return TEE_ERROR_BAD_PARAMETERS; + + prev_r = NULL; + TAILQ_FOREACH(r, &vmi->regions, link) { + if (TAILQ_FIRST(&vmi->regions) == r) { + va = select_va_in_range(va_range_base, 0, + r->va, r->attr, reg); + if (va) { + reg->va = va; + TAILQ_INSERT_HEAD(&vmi->regions, reg, link); + return TEE_SUCCESS; + } + } else { + va = select_va_in_range(prev_r->va + prev_r->size, + prev_r->attr, r->va, r->attr, + reg); + if (va) { + reg->va = va; + TAILQ_INSERT_BEFORE(r, reg, link); + return TEE_SUCCESS; } } + prev_r = r; + } - if (last_entry == mmu->regions + n) - return TEE_SUCCESS; /* Can't coalesc more */ - last_entry = mmu->regions + n; - - n--; - while (n >= TEE_MMU_UMAP_PARAM_IDX) { - struct tee_ta_region *entry = mmu->regions + n; - - n--; - if (last_entry->mobj != entry->mobj) - continue; - - if ((last_entry->offset + last_entry->size) == - entry->offset || - (entry->offset + entry->size) == - last_entry->offset || - core_is_buffer_intersect(last_entry->offset, - last_entry->size, - entry->offset, - entry->size)) { - noffs = MIN(last_entry->offset, entry->offset); - nsz = MAX(last_entry->offset + last_entry->size, - entry->offset + entry->size) - noffs; - entry->offset = noffs; - entry->size = nsz; - last_entry->mobj = NULL; - last_entry->size = 0; - last_entry->attr = 0; - break; - } + r = TAILQ_LAST(&vmi->regions, vm_region_head); + if (r) { + va = select_va_in_range(r->va + r->size, r->attr, + va_range_base + va_range_size, 0, reg); + if (va) { + reg->va = va; + TAILQ_INSERT_TAIL(&vmi->regions, reg, link); + return TEE_SUCCESS; + } + } else { + va = select_va_in_range(va_range_base, 0, + va_range_base + va_range_size, 0, reg); + if (va) { + reg->va = va; + TAILQ_INSERT_HEAD(&vmi->regions, reg, link); + return TEE_SUCCESS; } } + + return TEE_ERROR_ACCESS_CONFLICT; } -static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu) +TEE_Result vm_map(struct user_ta_ctx *utc, vaddr_t *va, size_t len, + uint32_t prot, struct mobj *mobj, size_t offs) { - const size_t granule = CORE_MMU_USER_PARAM_SIZE; - vaddr_t va_range_base; - vaddr_t va; - size_t va_range_size; - size_t n; + TEE_Result res; + struct vm_region *reg = calloc(1, sizeof(*reg)); + uint32_t attr = 0; + const uint32_t prot_mask = TEE_MATTR_PROT_MASK | TEE_MATTR_PERMANENT | + TEE_MATTR_EPHEMERAL; - /* Find last table entry used to map code and data */ - n = TEE_MMU_UMAP_PARAM_IDX - 1; - while (n && !mmu->regions[n].size) - n--; - va = mmu->regions[n].va + mmu->regions[n].size; - assert(va); + if (!reg) + return TEE_ERROR_OUT_OF_MEMORY; - core_mmu_get_user_va_range(&va_range_base, &va_range_size); - assert(va_range_base == mmu->ta_private_vmem_start); + if (prot & ~prot_mask) + return TEE_ERROR_BAD_PARAMETERS; - /* - * Assign parameters in secure memory. - */ - va = ROUNDUP(va, granule); - for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) { - if (!mmu->regions[n].size || - !(mmu->regions[n].attr & TEE_MATTR_SECURE)) - continue; - mmu->regions[n].va = va; - va += mmu->regions[n].size; - /* Put some empty space between each area */ - va += granule; - if ((va - va_range_base) >= va_range_size) - return TEE_ERROR_EXCESS_DATA; - } + if (!mobj_is_paged(mobj)) { + uint32_t cattr; - /* - * Assign parameters in nonsecure shared memory. - * Note that we're making sure that they will reside in a new page - * directory as they are to be mapped nonsecure. - */ - va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE); - for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) { - if (!mmu->regions[n].size || - (mmu->regions[n].attr & TEE_MATTR_SECURE)) - continue; - mmu->regions[n].va = va; - va += mmu->regions[n].size; - /* Put some empty space between each area */ - va += granule; - if ((va - va_range_base) >= va_range_size) - return TEE_ERROR_EXCESS_DATA; + res = mobj_get_cattr(mobj, &cattr); + if (res) + return res; + attr |= cattr << TEE_MATTR_CACHE_SHIFT; } - - return TEE_SUCCESS; + attr |= TEE_MATTR_VALID_BLOCK; + if (mobj_is_secure(mobj)) + attr |= TEE_MATTR_SECURE; + + reg->mobj = mobj; + reg->offset = offs; + reg->va = *va; + reg->size = ROUNDUP(len, SMALL_PAGE_SIZE); + reg->attr = attr | prot; + + res = umap_add_region(utc->vm_info, reg); + if (res) + free(reg); + else + *va = reg->va; + + return res; } static unsigned int asid_alloc(void) @@ -262,7 +271,7 @@ static void asid_free(unsigned int asid) cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions); } -TEE_Result tee_mmu_init(struct user_ta_ctx *utc) +TEE_Result vm_info_init(struct user_ta_ctx *utc) { uint32_t asid = asid_alloc(); @@ -271,24 +280,63 @@ TEE_Result tee_mmu_init(struct user_ta_ctx *utc) return TEE_ERROR_GENERIC; } - utc->mmu = calloc(1, sizeof(struct tee_mmu_info)); - if (!utc->mmu) { + utc->vm_info = calloc(1, sizeof(*utc->vm_info)); + if (!utc->vm_info) { asid_free(asid); return TEE_ERROR_OUT_OF_MEMORY; } - utc->mmu->asid = asid; - core_mmu_get_user_va_range(&utc->mmu->ta_private_vmem_start, NULL); + TAILQ_INIT(&utc->vm_info->regions); + utc->vm_info->asid = asid; return TEE_SUCCESS; } -static TEE_Result alloc_pgt(struct user_ta_ctx *utc __maybe_unused, - vaddr_t base, vaddr_t end) +static bool get_ta_private_range(const struct user_ta_ctx *utc, vaddr_t *start, + vaddr_t *end) +{ + struct vm_region *r; + + if (TAILQ_EMPTY(&utc->vm_info->regions)) + return false; + + *start = TAILQ_FIRST(&utc->vm_info->regions)->va; + r = TAILQ_LAST(&utc->vm_info->regions, vm_region_head); + *end = r->va + r->size; + return true; +} + +void vm_info_get_user_range(struct user_ta_ctx *utc, vaddr_t *vstart, + vaddr_t *vend) +{ + struct vm_region *r; + + TAILQ_FOREACH(r, &utc->vm_info->regions, link) + if (!(r->attr & TEE_MATTR_PERMANENT)) + break; + + if (!r) { + core_mmu_get_user_va_range(vstart, NULL); + *vend = *vstart; + return; + } + + *vstart = r->va; + r = TAILQ_LAST(&utc->vm_info->regions, vm_region_head); + *vend = r->va + r->size; +} + +static TEE_Result alloc_pgt(struct user_ta_ctx *utc) { struct thread_specific_data *tsd __maybe_unused; - vaddr_t b = ROUNDDOWN(base, CORE_MMU_PGDIR_SIZE); - vaddr_t e = ROUNDUP(end, CORE_MMU_PGDIR_SIZE); - size_t ntbl = (e - b) >> CORE_MMU_PGDIR_SHIFT; + vaddr_t b; + vaddr_t e; + size_t ntbl; + if (!get_ta_private_range(utc, &b, &e)) + return TEE_SUCCESS; + + b = ROUNDDOWN(b, CORE_MMU_PGDIR_SIZE); + e = ROUNDUP(e, CORE_MMU_PGDIR_SIZE); + ntbl = (e - b) >> CORE_MMU_PGDIR_SHIFT; if (!pgt_check_avail(ntbl)) { EMSG("%zu page tables not available", ntbl); return TEE_ERROR_OUT_OF_MEMORY; @@ -319,168 +367,59 @@ static void free_pgt(struct user_ta_ctx *utc, vaddr_t base, size_t size) pgt_flush_ctx_range(pgt_cache, &utc->ctx, base, base + size); } -static vaddr_t get_stack_va(struct user_ta_ctx *utc) +static TEE_Result map_kinit(struct user_ta_ctx *utc __maybe_unused) { #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 - struct tee_ta_region *r = utc->mmu->regions + - TEE_MMU_UMAP_STACK_IDX - 1; + struct mobj *mobj; + size_t offs; + vaddr_t va; + size_t sz; - return r->va + r->size; + thread_get_user_kcode(&mobj, &offs, &va, &sz); + return vm_map(utc, &va, sz, TEE_MATTR_PRX | TEE_MATTR_PERMANENT, + mobj, offs); #else - return utc->mmu->ta_private_vmem_start; -#endif -} - -void tee_mmu_map_stack(struct user_ta_ctx *utc, struct mobj *mobj) -{ - const size_t granule = CORE_MMU_USER_CODE_SIZE; - struct tee_ta_region *region = utc->mmu->regions + - TEE_MMU_UMAP_STACK_IDX; - - region->mobj = mobj; - region->offset = 0; - region->va = get_stack_va(utc); - region->size = ROUNDUP(utc->mobj_stack->size, granule); - region->attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | - TEE_MATTR_URW | TEE_MATTR_PRW | - (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT); + return TEE_SUCCESS; +#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ } -TEE_Result tee_mmu_map_add_segment(struct user_ta_ctx *utc, struct mobj *mobj, - size_t offs, size_t size, uint32_t prot) +TEE_Result tee_mmu_map_init(struct user_ta_ctx *utc) { - const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | - (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT); - const size_t granule = CORE_MMU_USER_CODE_SIZE; - struct tee_ta_region *tbl = utc->mmu->regions; - vaddr_t va; - vaddr_t end_va; - size_t n = TEE_MMU_UMAP_CODE_IDX; - size_t o; - - if (!tbl[n].size) { - /* We're continuing the va space from previous entry. */ - assert(tbl[n - 1].size); - - /* This is the first segment */ - va = tbl[n - 1].va + tbl[n - 1].size; - end_va = ROUNDUP((offs & (granule - 1)) + size, granule) + va; - o = ROUNDDOWN(offs, granule); - goto set_entry; - } - - /* - * mobj of code segments must not change once the first is - * assigned. - */ - if (mobj != tbl[n].mobj) - return TEE_ERROR_SECURITY; - - /* - * Let's find an entry we overlap with or if we need to add a new - * entry. - */ - o = offs - tbl[n].offset; - va = ROUNDDOWN(o, granule) + tbl[n].va; - end_va = ROUNDUP(o + size, granule) + tbl[n].va; - o = ROUNDDOWN(offs, granule); while (true) { - if (va >= (tbl[n].va + tbl[n].size)) { - n++; - if (n >= TEE_MMU_UMAP_PARAM_IDX) - return TEE_ERROR_SECURITY; - if (!tbl[n].size) - goto set_entry; - continue; - } + struct vm_region *r = TAILQ_FIRST(&utc->vm_info->regions); - /* - * There's at least partial overlap with this entry - * - * Since we're overlapping there should be at least one - * free entry after this. - */ - if (((n + 1) >= TEE_MMU_UMAP_PARAM_IDX) || tbl[n + 1].size) - return TEE_ERROR_SECURITY; - - /* offset must match or the segments aren't added in order */ - if (o != (va - tbl[n].va + tbl[n].offset)) - return TEE_ERROR_SECURITY; - /* We should only overlap in the last granule of the entry. */ - if ((va + granule) < (tbl[n].va + tbl[n].size)) - return TEE_ERROR_SECURITY; - - /* Merge protection attribute for this entry */ - tbl[n].attr |= prot; - - va += granule; - /* If the segment was completely overlapped, we're done. */ - if (va == end_va) - return TEE_SUCCESS; - o += granule; - n++; - goto set_entry; + if (!r) + break; + TAILQ_REMOVE(&utc->vm_info->regions, r, link); + free(r); } - -set_entry: - tbl[n].mobj = mobj; - tbl[n].va = va; - tbl[n].offset = o; - tbl[n].size = end_va - va; - tbl[n].attr = prot | attr; - - utc->mmu->ta_private_vmem_end = tbl[n].va + tbl[n].size; - /* - * Check that we have enough translation tables available to map - * this TA. - */ - return alloc_pgt(utc, utc->mmu->ta_private_vmem_start, - utc->mmu->ta_private_vmem_end); -} - -static void map_kinit(struct user_ta_ctx *utc __maybe_unused) -{ -#ifdef CFG_CORE_UNMAP_CORE_AT_EL0 - struct tee_ta_region *regions = utc->mmu->regions; - const uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE | - (TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT); - unsigned int idx; - - /* Add an entry for kernel code being mapped while in user mode */ - idx = TEE_MMU_UMAP_KCODE_IDX; - thread_get_user_kcode(®ions[idx].mobj, ®ions[idx].offset, - ®ions[idx].va, ®ions[idx].size); - regions[idx].attr = attr | TEE_MATTR_PRX; - assert(regions[idx].va == utc->mmu->ta_private_vmem_start); -#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ -} - -void tee_mmu_map_init(struct user_ta_ctx *utc) -{ - utc->mmu->ta_private_vmem_end = 0; - memset(utc->mmu->regions, 0, sizeof(utc->mmu->regions)); - map_kinit(utc); + return map_kinit(utc); } static void clear_param_map(struct user_ta_ctx *utc) { - const size_t n = TEE_MMU_UMAP_PARAM_IDX; - const size_t array_size = ARRAY_SIZE(utc->mmu->regions); + struct vm_region *next_r; + struct vm_region *r; - memset(utc->mmu->regions + n, 0, - (array_size - n) * sizeof(utc->mmu->regions[0])); + TAILQ_FOREACH_SAFE(r, &utc->vm_info->regions, link, next_r) { + if (r->attr & TEE_MATTR_EPHEMERAL) { + TAILQ_REMOVE(&utc->vm_info->regions, r, link); + free(r); + } + } } static TEE_Result param_mem_to_user_va(struct user_ta_ctx *utc, struct param_mem *mem, void **user_va) { - size_t n; + struct vm_region *region; - for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) { - struct tee_ta_region *region = utc->mmu->regions + n; + TAILQ_FOREACH(region, &utc->vm_info->regions, link) { vaddr_t va; size_t phys_offs; + if (!(region->attr & TEE_MATTR_EPHEMERAL)) + continue; if (mem->mobj != region->mobj) continue; if (mem->offs < region->offset) @@ -496,227 +435,240 @@ static TEE_Result param_mem_to_user_va(struct user_ta_ctx *utc, return TEE_ERROR_GENERIC; } +static int cmp_param_mem(const void *a0, const void *a1) +{ + const struct param_mem *m1 = a1; + const struct param_mem *m0 = a0; + int ret; + + /* Make sure that invalid param_mem are placed last in the array */ + if (!m0->size && !m1->size) + return 0; + if (!m0->size) + return 1; + if (!m1->size) + return -1; + + ret = CMP_TRILEAN(mobj_is_secure(m0->mobj), mobj_is_secure(m1->mobj)); + if (ret) + return ret; + + ret = CMP_TRILEAN((vaddr_t)m0->mobj, (vaddr_t)m1->mobj); + if (ret) + return ret; + + ret = CMP_TRILEAN(m0->offs, m1->offs); + if (ret) + return ret; + + return CMP_TRILEAN(m0->size, m1->size); +} + TEE_Result tee_mmu_map_param(struct user_ta_ctx *utc, struct tee_ta_param *param, void *param_va[TEE_NUM_PARAMS]) { TEE_Result res = TEE_SUCCESS; size_t n; + size_t m; + struct param_mem mem[TEE_NUM_PARAMS]; - /* Clear all the param entries as they can hold old information */ - clear_param_map(utc); - - /* Map secure memory params first then nonsecure memory params */ + memset(mem, 0, sizeof(mem)); for (n = 0; n < TEE_NUM_PARAMS; n++) { uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); - struct param_mem *mem = ¶m->u[n].mem; + size_t phys_offs; if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && param_type != TEE_PARAM_TYPE_MEMREF_INOUT) continue; - if (!mem->size) - continue; - if (mobj_is_nonsec(mem->mobj)) - continue; - - res = tee_mmu_umap_add_param(utc->mmu, mem); - if (res != TEE_SUCCESS) - return res; + phys_offs = mobj_get_phys_offs(param->u[n].mem.mobj, + CORE_MMU_USER_PARAM_SIZE); + mem[n].mobj = param->u[n].mem.mobj; + mem[n].offs = ROUNDDOWN(phys_offs + param->u[n].mem.offs, + CORE_MMU_USER_PARAM_SIZE); + mem[n].size = ROUNDUP(phys_offs + param->u[n].mem.offs - + mem[n].offs + param->u[n].mem.size, + CORE_MMU_USER_PARAM_SIZE); } - for (n = 0; n < TEE_NUM_PARAMS; n++) { - uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); - struct param_mem *mem = ¶m->u[n].mem; - if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && - param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && - param_type != TEE_PARAM_TYPE_MEMREF_INOUT) - continue; - if (!mem->size) - continue; - if (!mobj_is_nonsec(mem->mobj)) + /* + * Sort arguments so size = 0 is last, secure mobjs first, then by + * mobj pointer value since those entries can't be merged either, + * finally by offset. + * + * This should result in a list where all mergeable entries are + * next to each other and unused/invalid entries are at the end. + */ + qsort(mem, TEE_NUM_PARAMS, sizeof(struct param_mem), cmp_param_mem); + + for (n = 1, m = 0; n < TEE_NUM_PARAMS && mem[n].size; n++) { + if (mem[n].mobj == mem[m].mobj && + (mem[n].offs == (mem[m].offs + mem[m].size) || + core_is_buffer_intersect(mem[m].offs, mem[m].size, + mem[n].offs, mem[n].size))) { + mem[m].size = mem[n].offs + mem[n].size - mem[m].offs; continue; + } + m++; + if (n != m) + mem[m] = mem[n]; + } + /* + * We'd like 'm' to be the number of valid entries. Here 'm' is the + * index of the last valid entry if the first entry is valid, else + * 0. + */ + if (mem[0].size) + m++; - res = tee_mmu_umap_add_param(utc->mmu, mem); - if (res != TEE_SUCCESS) + /* Clear all the param entries as they can hold old information */ + clear_param_map(utc); + + for (n = 0; n < m; n++) { + vaddr_t va = 0; + const uint32_t prot = TEE_MATTR_PRW | TEE_MATTR_URW | + TEE_MATTR_EPHEMERAL; + + res = vm_map(utc, &va, mem[n].size, prot, mem[n].mobj, + mem[n].offs); + if (res) return res; } - res = tee_mmu_umap_set_vas(utc->mmu); - if (res != TEE_SUCCESS) - return res; - for (n = 0; n < TEE_NUM_PARAMS; n++) { uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n); - struct param_mem *mem = ¶m->u[n].mem; if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT && param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT && param_type != TEE_PARAM_TYPE_MEMREF_INOUT) continue; - if (mem->size == 0) + if (param->u[n].mem.size == 0) continue; - res = param_mem_to_user_va(utc, mem, param_va + n); + res = param_mem_to_user_va(utc, ¶m->u[n].mem, param_va + n); if (res != TEE_SUCCESS) return res; } - utc->mmu->ta_private_vmem_start = utc->mmu->regions[0].va; - - n = ARRAY_SIZE(utc->mmu->regions); - do { - n--; - } while (n && !utc->mmu->regions[n].size); - - return alloc_pgt(utc, utc->mmu->ta_private_vmem_start, - utc->mmu->regions[n].va + utc->mmu->regions[n].size); + return alloc_pgt(utc); } TEE_Result tee_mmu_add_rwmem(struct user_ta_ctx *utc, struct mobj *mobj, vaddr_t *va) { - struct tee_ta_region *reg = NULL; - struct tee_ta_region *last_reg; - vaddr_t v; - vaddr_t end_v; - size_t n; - - /* - * Avoid the corner case when no regions are assigned, currently - * stack and code areas are always assigned before we end up here. - */ - if (!utc->mmu->regions[0].size) - return TEE_ERROR_GENERIC; - - for (n = 1; n < ARRAY_SIZE(utc->mmu->regions); n++) { - if (!reg && utc->mmu->regions[n].size) - continue; - last_reg = utc->mmu->regions + n; + TEE_Result res; + struct vm_region *reg = calloc(1, sizeof(*reg)); - if (!reg) { - reg = last_reg; - v = ROUNDUP((reg - 1)->va + (reg - 1)->size, - SMALL_PAGE_SIZE); -#ifndef CFG_WITH_LPAE - /* - * Non-LPAE mappings can't mix secure and - * non-secure in a single pgdir. - */ - if (mobj_is_secure((reg - 1)->mobj) != - mobj_is_secure(mobj)) - v = ROUNDUP(v, CORE_MMU_PGDIR_SIZE); -#endif - end_v = ROUNDUP(v + mobj->size, SMALL_PAGE_SIZE); - continue; - } + if (!reg) + return TEE_ERROR_OUT_OF_MEMORY; - if (!last_reg->size) - continue; - /* - * There's one registered region after our selected spot, - * check if we can still fit or if we need a later spot. - */ - if (end_v > last_reg->va) { - reg = NULL; - continue; - } -#ifndef CFG_WITH_LPAE - if (mobj_is_secure(mobj) != mobj_is_secure(last_reg->mobj) && - end_v > ROUNDDOWN(last_reg->va, CORE_MMU_PGDIR_SIZE)) - reg = NULL; -#endif + reg->mobj = mobj; + reg->offset = 0; + reg->va = 0; + reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); + if (mobj_is_secure(mobj)) + reg->attr = TEE_MATTR_SECURE; + else + reg->attr = 0; + + res = umap_add_region(utc->vm_info, reg); + if (res) { + free(reg); + return res; } - if (reg) { - TEE_Result res; - - end_v = MAX(end_v, last_reg->va + last_reg->size); - res = alloc_pgt(utc, utc->mmu->ta_private_vmem_start, end_v); - if (res != TEE_SUCCESS) - return res; - - *va = v; - reg->va = v; - reg->mobj = mobj; - reg->offset = 0; - reg->size = ROUNDUP(mobj->size, SMALL_PAGE_SIZE); - if (mobj_is_secure(mobj)) - reg->attr = TEE_MATTR_SECURE; - else - reg->attr = 0; - return TEE_SUCCESS; + res = alloc_pgt(utc); + if (res) { + TAILQ_REMOVE(&utc->vm_info->regions, reg, link); + free(reg); + } else { + *va = reg->va; } - return TEE_ERROR_OUT_OF_MEMORY; + return res; } void tee_mmu_rem_rwmem(struct user_ta_ctx *utc, struct mobj *mobj, vaddr_t va) { - size_t n; - - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - struct tee_ta_region *reg = utc->mmu->regions + n; + struct vm_region *reg; + TAILQ_FOREACH(reg, &utc->vm_info->regions, link) { if (reg->mobj == mobj && reg->va == va) { free_pgt(utc, reg->va, reg->size); - memset(reg, 0, sizeof(*reg)); + TAILQ_REMOVE(&utc->vm_info->regions, reg, link); + free(reg); return; } } } -/* - * tee_mmu_final - finalise and free ctx mmu - */ -void tee_mmu_final(struct user_ta_ctx *utc) +void vm_info_final(struct user_ta_ctx *utc) { - if (!utc->mmu) + if (!utc->vm_info) return; /* clear MMU entries to avoid clash when asid is reused */ - tlbi_asid(utc->mmu->asid); + tlbi_asid(utc->vm_info->asid); - asid_free(utc->mmu->asid); - free(utc->mmu); - utc->mmu = NULL; + asid_free(utc->vm_info->asid); + while (!TAILQ_EMPTY(&utc->vm_info->regions)) { + struct vm_region *r = TAILQ_FIRST(&utc->vm_info->regions); + + TAILQ_REMOVE(&utc->vm_info->regions, r, link); + free(r); + } + free(utc->vm_info); + utc->vm_info = NULL; } /* return true only if buffer fits inside TA private memory */ bool tee_mmu_is_vbuf_inside_ta_private(const struct user_ta_ctx *utc, const void *va, size_t size) { - return core_is_buffer_inside(va, size, - utc->mmu->ta_private_vmem_start, - utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start); + struct vm_region *r; + + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { + if (r->attr & (TEE_MATTR_EPHEMERAL | TEE_MATTR_PERMANENT)) + continue; + if (core_is_buffer_inside(va, size, r->va, r->size)) + return true; + } + + return false; } /* return true only if buffer intersects TA private memory */ bool tee_mmu_is_vbuf_intersect_ta_private(const struct user_ta_ctx *utc, const void *va, size_t size) { - return core_is_buffer_intersect(va, size, - utc->mmu->ta_private_vmem_start, - utc->mmu->ta_private_vmem_end - utc->mmu->ta_private_vmem_start); + struct vm_region *r; + + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { + if (r->attr & (TEE_MATTR_EPHEMERAL | TEE_MATTR_PERMANENT)) + continue; + if (core_is_buffer_intersect(va, size, r->va, r->size)) + return true; + } + + return false; } TEE_Result tee_mmu_vbuf_to_mobj_offs(const struct user_ta_ctx *utc, const void *va, size_t size, struct mobj **mobj, size_t *offs) { - size_t n; + struct vm_region *r; - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - if (!utc->mmu->regions[n].mobj) + TAILQ_FOREACH(r, &utc->vm_info->regions, link) { + if (!r->mobj) continue; - if (core_is_buffer_inside(va, size, utc->mmu->regions[n].va, - utc->mmu->regions[n].size)) { + if (core_is_buffer_inside(va, size, r->va, r->size)) { size_t poffs; - poffs = mobj_get_phys_offs(utc->mmu->regions[n].mobj, + poffs = mobj_get_phys_offs(r->mobj, CORE_MMU_USER_PARAM_SIZE); - *mobj = utc->mmu->regions[n].mobj; - *offs = (vaddr_t)va - utc->mmu->regions[n].va + - utc->mmu->regions[n].offset - poffs; + *mobj = r->mobj; + *offs = (vaddr_t)va - r->va + r->offset - poffs; return TEE_SUCCESS; } } @@ -727,11 +679,9 @@ TEE_Result tee_mmu_vbuf_to_mobj_offs(const struct user_ta_ctx *utc, static TEE_Result tee_mmu_user_va2pa_attr(const struct user_ta_ctx *utc, void *ua, paddr_t *pa, uint32_t *attr) { - size_t n; - - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - struct tee_ta_region *region = &utc->mmu->regions[n]; + struct vm_region *region; + TAILQ_FOREACH(region, &utc->vm_info->regions, link) { if (!core_is_buffer_inside(ua, 1, region->va, region->size)) continue; @@ -763,7 +713,7 @@ static TEE_Result tee_mmu_user_va2pa_attr(const struct user_ta_ctx *utc, *pa = p | ((vaddr_t)ua & (granule - 1)); } if (attr) - *attr = utc->mmu->regions[n].attr; + *attr = region->attr; return TEE_SUCCESS; } @@ -783,10 +733,9 @@ TEE_Result tee_mmu_user_pa2va_helper(const struct user_ta_ctx *utc, { TEE_Result res; paddr_t p; - size_t n; + struct vm_region *region; - for (n = 0; n < ARRAY_SIZE(utc->mmu->regions); n++) { - struct tee_ta_region *region = &utc->mmu->regions[n]; + TAILQ_FOREACH(region, &utc->vm_info->regions, link) { size_t granule; size_t size; size_t ofs; @@ -908,14 +857,6 @@ struct tee_ta_ctx *tee_mmu_get_ctx(void) return thread_get_tsd()->ctx; } -uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx) -{ - const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx); - - assert(utc->mmu); - return utc->mmu->regions[TEE_MMU_UMAP_CODE_IDX].va; -} - void teecore_init_ta_ram(void) { vaddr_t s; diff --git a/core/include/mm/tee_mmu.h b/core/include/mm/tee_mmu.h index 989a1b5f..6f9d5b32 100644 --- a/core/include/mm/tee_mmu.h +++ b/core/include/mm/tee_mmu.h @@ -35,23 +35,39 @@ /*----------------------------------------------------------------------------- * Allocate context resources like ASID and MMU table information *---------------------------------------------------------------------------*/ -TEE_Result tee_mmu_init(struct user_ta_ctx *utc); +TEE_Result vm_info_init(struct user_ta_ctx *utc); /*----------------------------------------------------------------------------- - * tee_mmu_final - Release context resources like ASID + * Release context resources like ASID *---------------------------------------------------------------------------*/ -void tee_mmu_final(struct user_ta_ctx *utc); +void vm_info_final(struct user_ta_ctx *utc); + +/* + * Returns range of user mapping (excluding eventual permanent kernel mapping). + */ +void vm_info_get_user_range(struct user_ta_ctx *utc, vaddr_t *vstart, + vaddr_t *vend); + +/* + * Creates a memory map of a mobj. + * Desired virtual address can be specified in @va otherwise @va must be + * initialized to 0 if the next available can be chosen. + */ +TEE_Result vm_map(struct user_ta_ctx *utc, vaddr_t *va, size_t len, + uint32_t prot, struct mobj *mobj, size_t offs); /* Map stack of a user TA. */ -void tee_mmu_map_stack(struct user_ta_ctx *utc, struct mobj *mobj); +TEE_Result tee_mmu_map_stack(struct user_ta_ctx *utc, struct mobj *mobj); + /* * Map a code segment of a user TA, this function may be called multiple * times if there's several segments. */ TEE_Result tee_mmu_map_add_segment(struct user_ta_ctx *utc, struct mobj *mobj, - size_t offs, size_t size, uint32_t prot); + size_t offs, size_t size, uint32_t prot, + vaddr_t *va); -void tee_mmu_map_init(struct user_ta_ctx *utc); +TEE_Result tee_mmu_map_init(struct user_ta_ctx *utc); /* Map parameters for a user TA */ TEE_Result tee_mmu_map_param(struct user_ta_ctx *utc, diff --git a/core/include/mm/tee_mmu_types.h b/core/include/mm/tee_mmu_types.h index 9f61548c..221a9426 100644 --- a/core/include/mm/tee_mmu_types.h +++ b/core/include/mm/tee_mmu_types.h @@ -29,6 +29,7 @@ #define TEE_MMU_TYPES_H #include <stdint.h> +#include <sys/queue.h> #include <util.h> #define TEE_MATTR_VALID_BLOCK BIT(0) @@ -93,18 +94,19 @@ struct tee_mmap_region { uint32_t attr; /* TEE_MATTR_* above */ }; -struct tee_ta_region { +struct vm_region { struct mobj *mobj; size_t offset; vaddr_t va; size_t size; uint32_t attr; /* TEE_MATTR_* above */ + TAILQ_ENTRY(vm_region) link; }; -struct tee_mmu_info { - struct tee_ta_region regions[TEE_MMU_UMAP_MAX_ENTRIES]; - vaddr_t ta_private_vmem_start; - vaddr_t ta_private_vmem_end; +TAILQ_HEAD(vm_region_head, vm_region); + +struct vm_info { + struct vm_region_head regions; unsigned int asid; }; diff --git a/core/kernel/tee_ta_manager.c b/core/kernel/tee_ta_manager.c index 010d7fc5..4a11d343 100644 --- a/core/kernel/tee_ta_manager.c +++ b/core/kernel/tee_ta_manager.c @@ -710,7 +710,7 @@ static void update_current_ctx(struct thread_specific_data *tsd) * if ctx->mmu != NULL we must have user mapping active. */ if (((ctx && is_user_ta_ctx(ctx) ? - to_user_ta_ctx(ctx)->mmu : NULL) == NULL) == + to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) == core_mmu_user_mapping_is_active()) panic("unexpected active mapping"); } |