diff options
author | Etienne Carriere <etienne.carriere@linaro.org> | 2017-06-16 11:23:50 +0200 |
---|---|---|
committer | Jérôme Forissier <jerome.forissier@linaro.org> | 2017-06-16 16:44:14 +0200 |
commit | a3ea24cfecb47a3049933778cc25f9e72cccb4d0 (patch) | |
tree | a93aa6eacf03cf5152163c51cabda6aec5f55f5c /core/arch/arm/mm/core_mmu_v7.c | |
parent | 4e1faa2f3ba73d73a48be6c81f8e8b4d26b733ec (diff) |
core: clarify end of static mapping table
Move remaining code relying on null size value for detecting end
of static mapping table with a test on type value. This is made
consistent between lpae and non-lpae implementations.
Rename MEM_AREA_NOTYPE into MEM_AREA_END as it is dedicated to this
specific purpose.
Faulty core_mmu_get_type_by_pa() can return MEM_AREA_MAXTYPE on invalid
cases.
Add a comment highlighting null sized entry are not filled in the static
mapping directives table.
Forgive the trick on level_index_m'sk to fit in the 80 chars/line.
Signed-off-by: Etienne Carriere <etienne.carriere@linaro.org>
Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
Diffstat (limited to 'core/arch/arm/mm/core_mmu_v7.c')
-rw-r--r-- | core/arch/arm/mm/core_mmu_v7.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index ba484b8e..76d974cf 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -715,7 +715,7 @@ static void map_page_memarea_in_pgdirs(const struct tee_mmap_region *mm, paddr_t pa = 0; size_t n; - if (!mm->size) + if (core_mmap_is_end_of_table(mm)) return; print_mmap_area(mm, "4k page map"); @@ -742,7 +742,7 @@ static void map_memarea_sections(const struct tee_mmap_region *mm, paddr_t pa = 0; size_t n; - if (!mm->size) + if (core_mmap_is_end_of_table(mm)) return; print_mmap_area(mm, "section map"); @@ -808,7 +808,7 @@ void core_init_mmu_tables(struct tee_mmap_region *mm) /* reset L1 table */ memset(ttb1, 0, L1_TBL_SIZE); - for (n = 0; mm[n].type != MEM_AREA_NOTYPE; n++) + for (n = 0; !core_mmap_is_end_of_table(mm + n); n++) if (!core_mmu_is_dynamic_vaspace(mm + n)) map_memarea(mm + n, ttb1); } |