aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2019-04-18 08:56:31 +0200
committerJérôme Forissier <jerome.forissier@linaro.org>2019-05-02 13:58:09 +0200
commit37a6b717787bf3927f7af379ae66b1b6d0fe2a51 (patch)
treec7a9472aa82d7363ab4e6e36ce0680cec59ca80e /core
parentfda78375e37ff7341cd205bdc26c9ed9b991b927 (diff)
core: introduce CFG_CORE_DYN_SHM
Introduces CFG_CORE_DYN_SHM which if set to y enables dynamic shared memory, else disables support for dynamic shared memory. In contrast with CFG_DYN_SHM_CAP it actually removes the support instead of just omit reporting it. Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Diffstat (limited to 'core')
-rw-r--r--core/arch/arm/include/mm/core_mmu.h2
-rw-r--r--core/arch/arm/include/mm/mobj.h12
-rw-r--r--core/arch/arm/kernel/generic_boot.c70
-rw-r--r--core/arch/arm/kernel/thread.c2
-rw-r--r--core/arch/arm/mm/core_mmu.c7
-rw-r--r--core/arch/arm/mm/mobj.c427
-rw-r--r--core/arch/arm/mm/mobj_dyn_shm.c451
-rw-r--r--core/arch/arm/mm/sub.mk1
-rw-r--r--core/arch/arm/tee/entry_fast.c2
-rw-r--r--core/arch/arm/tee/entry_std.c25
-rw-r--r--core/include/kernel/msg_param.h15
-rw-r--r--core/kernel/sub.mk2
12 files changed, 548 insertions, 468 deletions
diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h
index 23819a97..09f03d10 100644
--- a/core/arch/arm/include/mm/core_mmu.h
+++ b/core/arch/arm/include/mm/core_mmu.h
@@ -593,6 +593,7 @@ bool cpu_mmu_enabled(void);
/* Do section mapping, not support on LPAE */
void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb);
+#ifdef CFG_CORE_DYN_SHM
/*
* Check if platform defines nsec DDR range(s).
* Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
@@ -602,6 +603,7 @@ bool core_mmu_nsec_ddr_is_defined(void);
void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
size_t nelems);
+#endif
/* Initialize MMU partition */
void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm);
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
index 2429d677..fbc8cbf9 100644
--- a/core/arch/arm/include/mm/mobj.h
+++ b/core/arch/arm/include/mm/mobj.h
@@ -120,6 +120,7 @@ struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
enum buf_is_attr battr);
+#ifdef CFG_CORE_DYN_SHM
/* reg_shm represents TEE shared memory */
struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
paddr_t page_offset, uint64_t cookie);
@@ -188,6 +189,17 @@ void mobj_reg_shm_unguard(struct mobj *mobj);
*/
struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
paddr_t page_offset, uint64_t cookie);
+#else
+static inline TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+
+static inline TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj __unused)
+{
+ return TEE_ERROR_NOT_SUPPORTED;
+}
+#endif /*CFG_CORE_DYN_SHM*/
struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie);
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
index 5b5a6626..893e6468 100644
--- a/core/arch/arm/kernel/generic_boot.c
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -735,28 +735,6 @@ static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
}
}
-static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
- uint32_t cell_size)
-{
- uint64_t rv;
-
- if (cell_size == 1) {
- uint32_t v;
-
- memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
- *offs += sizeof(v);
- rv = fdt32_to_cpu(v);
- } else {
- uint64_t v;
-
- memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
- *offs += sizeof(v);
- rv = fdt64_to_cpu(v);
- }
-
- return rv;
-}
-
static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
paddr_t pa, size_t size)
{
@@ -818,16 +796,39 @@ static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
return 0;
}
+#ifdef CFG_CORE_DYN_SHM
+static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
+ uint32_t cell_size)
+{
+ uint64_t rv = 0;
+
+ if (cell_size == 1) {
+ uint32_t v;
+
+ memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
+ *offs += sizeof(v);
+ rv = fdt32_to_cpu(v);
+ } else {
+ uint64_t v;
+
+ memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
+ *offs += sizeof(v);
+ rv = fdt64_to_cpu(v);
+ }
+
+ return rv;
+}
+
static struct core_mmu_phys_mem *get_memory(void *fdt, size_t *nelems)
{
- int offs;
- int addr_size;
- int len_size;
- size_t prop_len;
- const uint8_t *prop;
- size_t prop_offs;
- size_t n;
- struct core_mmu_phys_mem *mem;
+ int offs = 0;
+ int addr_size = 0;
+ int len_size = 0;
+ size_t prop_len = 0;
+ const uint8_t *prop = NULL;
+ size_t prop_offs = 0;
+ size_t n = 0;
+ struct core_mmu_phys_mem *mem = NULL;
offs = fdt_subnode_offset(fdt, 0, "memory");
if (offs < 0)
@@ -873,6 +874,7 @@ static struct core_mmu_phys_mem *get_memory(void *fdt, size_t *nelems)
return mem;
}
+#endif /*CFG_CORE_DYN_SHM*/
static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
{
@@ -985,13 +987,16 @@ static void update_external_dt(void)
{
}
+#ifdef CFG_CORE_DYN_SHM
static struct core_mmu_phys_mem *get_memory(void *fdt __unused,
size_t *nelems __unused)
{
return NULL;
}
+#endif /*CFG_CORE_DYN_SHM*/
#endif /*!CFG_DT*/
+#ifdef CFG_CORE_DYN_SHM
static void discover_nsec_memory(void)
{
struct core_mmu_phys_mem *mem;
@@ -1022,6 +1027,11 @@ static void discover_nsec_memory(void)
memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
core_mmu_set_discovered_nsec_ddr(mem, nelems);
}
+#else /*CFG_CORE_DYN_SHM*/
+static void discover_nsec_memory(void)
+{
+}
+#endif /*!CFG_CORE_DYN_SHM*/
void init_tee_runtime(void)
{
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
index 38728efc..c590badd 100644
--- a/core/arch/arm/kernel/thread.c
+++ b/core/arch/arm/kernel/thread.c
@@ -1414,8 +1414,10 @@ static struct mobj *thread_rpc_alloc_arg(size_t size)
/* Check if this region is in static shared space */
if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size))
mobj = mobj_shm_alloc(pa, size, co);
+#ifdef CFG_CORE_DYN_SHM
else if ((!(pa & SMALL_PAGE_MASK)) && size <= SMALL_PAGE_SIZE)
mobj = mobj_mapped_shm_alloc(&pa, 1, 0, co);
+#endif
if (!mobj)
goto err;
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
index 020a3e8a..c280d602 100644
--- a/core/arch/arm/mm/core_mmu.c
+++ b/core/arch/arm/mm/core_mmu.c
@@ -260,6 +260,7 @@ static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
return false;
}
+#ifdef CFG_CORE_DYN_SHM
static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
paddr_t pa, size_t size)
{
@@ -417,6 +418,12 @@ bool core_mmu_nsec_ddr_is_defined(void)
return start != end;
}
+#else
+static bool pbuf_is_nsec_ddr(paddr_t pbuf __unused, size_t len __unused)
+{
+ return false;
+}
+#endif /*CFG_CORE_DYN_SHM*/
#define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
EMSG("[%" PRIxPA " %" PRIx64 "] intersects [%" PRIxPA " %" PRIx64 "]", \
diff --git a/core/arch/arm/mm/mobj.c b/core/arch/arm/mm/mobj.c
index 56d046b1..2b5a8f5e 100644
--- a/core/arch/arm/mm/mobj.c
+++ b/core/arch/arm/mm/mobj.c
@@ -38,10 +38,6 @@ struct mobj_phys {
paddr_t pa;
};
-static struct mutex shm_mu = MUTEX_INITIALIZER;
-static struct condvar shm_cv = CONDVAR_INITIALIZER;
-static size_t shm_release_waiters;
-
static struct mobj_phys *to_mobj_phys(struct mobj *mobj);
static void *mobj_phys_get_va(struct mobj *mobj, size_t offset)
@@ -300,429 +296,6 @@ struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size,
return &m->mobj;
}
-/*
- * mobj_reg_shm implementation. Describes shared memory provided by normal world
- */
-
-struct mobj_reg_shm {
- struct mobj mobj;
- SLIST_ENTRY(mobj_reg_shm) next;
- uint64_t cookie;
- tee_mm_entry_t *mm;
- paddr_t page_offset;
- struct refcount refcount;
- struct refcount mapcount;
- int num_pages;
- bool guarded;
- paddr_t pages[];
-};
-
-static size_t mobj_reg_shm_size(size_t nr_pages)
-{
- size_t s = 0;
-
- if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
- return 0;
- if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
- return 0;
- return s;
-}
-
-static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
- SLIST_HEAD_INITIALIZER(reg_shm_head);
-
-static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
-static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
-
-static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
-
-static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
- size_t granule, paddr_t *pa)
-{
- struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
- size_t full_offset;
- paddr_t p;
-
- if (!pa)
- return TEE_ERROR_GENERIC;
-
- full_offset = offst + mobj_reg_shm->page_offset;
- if (full_offset >= mobj->size)
- return TEE_ERROR_GENERIC;
-
- switch (granule) {
- case 0:
- p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
- (full_offset & SMALL_PAGE_MASK);
- break;
- case SMALL_PAGE_SIZE:
- p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
- break;
- default:
- return TEE_ERROR_GENERIC;
-
- }
- *pa = p;
-
- return TEE_SUCCESS;
-}
-KEEP_PAGER(mobj_reg_shm_get_pa);
-
-static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
- size_t granule __maybe_unused)
-{
- assert(granule >= mobj->phys_granule);
- return to_mobj_reg_shm(mobj)->page_offset;
-}
-
-static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst)
-{
- struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
-
- if (!mrs->mm)
- return NULL;
-
- return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
- mrs->page_offset);
-}
-
-static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
-{
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
-
- if (r->mm) {
- core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
- r->mobj.size / SMALL_PAGE_SIZE);
- tee_mm_free(r->mm);
- r->mm = NULL;
- }
-
- cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
-}
-
-static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
-{
- reg_shm_unmap_helper(mobj_reg_shm);
- SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
- free(mobj_reg_shm);
-}
-
-static void mobj_reg_shm_free(struct mobj *mobj)
-{
- mobj_reg_shm_put(mobj);
-}
-
-static TEE_Result mobj_reg_shm_get_cattr(struct mobj *mobj __unused,
- uint32_t *cattr)
-{
- if (!cattr)
- return TEE_ERROR_GENERIC;
-
- *cattr = TEE_MATTR_CACHE_CACHED;
-
- return TEE_SUCCESS;
-}
-
-static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
-
-static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
-{
- return to_mobj_reg_shm(mobj)->cookie;
-}
-
-static const struct mobj_ops mobj_reg_shm_ops __rodata_unpaged = {
- .get_pa = mobj_reg_shm_get_pa,
- .get_phys_offs = mobj_reg_shm_get_phys_offs,
- .get_va = mobj_reg_shm_get_va,
- .get_cattr = mobj_reg_shm_get_cattr,
- .matches = mobj_reg_shm_matches,
- .free = mobj_reg_shm_free,
- .get_cookie = mobj_reg_shm_get_cookie,
-};
-
-static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
- enum buf_is_attr attr)
-{
- assert(mobj->ops == &mobj_reg_shm_ops);
-
- return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
-}
-
-static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
-{
- assert(mobj->ops == &mobj_reg_shm_ops);
- return container_of(mobj, struct mobj_reg_shm, mobj);
-}
-
-static struct mobj_reg_shm *to_mobj_reg_shm_may_fail(struct mobj *mobj)
-{
- if (mobj && mobj->ops != &mobj_reg_shm_ops)
- return NULL;
-
- return container_of(mobj, struct mobj_reg_shm, mobj);
-}
-
-struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
- paddr_t page_offset, uint64_t cookie)
-{
- struct mobj_reg_shm *mobj_reg_shm;
- size_t i;
- uint32_t exceptions;
- size_t s;
-
- if (!num_pages)
- return NULL;
-
- s = mobj_reg_shm_size(num_pages);
- if (!s)
- return NULL;
- mobj_reg_shm = calloc(1, s);
- if (!mobj_reg_shm)
- return NULL;
-
- mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
- mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE;
- mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
- mobj_reg_shm->cookie = cookie;
- mobj_reg_shm->guarded = true;
- mobj_reg_shm->num_pages = num_pages;
- mobj_reg_shm->page_offset = page_offset;
- memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
- refcount_set(&mobj_reg_shm->refcount, 1);
-
- /* Insure loaded references match format and security constraints */
- for (i = 0; i < num_pages; i++) {
- if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
- goto err;
-
- /* Only Non-secure memory can be mapped there */
- if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
- SMALL_PAGE_SIZE))
- goto err;
- }
-
- exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
- SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
- cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
-
- return &mobj_reg_shm->mobj;
-err:
- free(mobj_reg_shm);
- return NULL;
-}
-
-void mobj_reg_shm_unguard(struct mobj *mobj)
-{
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
-
- to_mobj_reg_shm(mobj)->guarded = false;
- cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
-}
-
-static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
-{
- struct mobj_reg_shm *mobj_reg_shm;
-
- SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
- if (mobj_reg_shm->cookie == cookie)
- return mobj_reg_shm;
-
- return NULL;
-}
-
-struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
-{
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
- struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
-
- if (r) {
- /*
- * Counter is supposed to be larger than 0, if it isn't
- * we're in trouble.
- */
- if (!refcount_inc(&r->refcount))
- panic();
- }
-
- cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
-
- if (r)
- return &r->mobj;
-
- return NULL;
-}
-
-void mobj_reg_shm_put(struct mobj *mobj)
-{
- struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
-
- /*
- * A put is supposed to match a get or the initial alloc, once
- * we're at zero there's no more user and the original allocator is
- * done too.
- */
- if (refcount_dec(&r->refcount))
- reg_shm_free_helper(r);
-
- cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
-
- /*
- * Note that we're reading this mutex protected variable without the
- * mutex acquired. This isn't a problem since an eventually missed
- * waiter who is waiting for this MOBJ will try again before hanging
- * in condvar_wait().
- */
- if (shm_release_waiters) {
- mutex_lock(&shm_mu);
- condvar_broadcast(&shm_cv);
- mutex_unlock(&shm_mu);
- }
-}
-
-static TEE_Result try_release_reg_shm(uint64_t cookie)
-{
- TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
- struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
-
- if (!r || r->guarded)
- goto out;
-
- res = TEE_ERROR_BUSY;
- if (refcount_val(&r->refcount) == 1) {
- reg_shm_free_helper(r);
- res = TEE_SUCCESS;
- }
-out:
- cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
-
- return res;
-}
-
-TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
-{
- TEE_Result res = try_release_reg_shm(cookie);
-
- if (res != TEE_ERROR_BUSY)
- return res;
-
- mutex_lock(&shm_mu);
- shm_release_waiters++;
- assert(shm_release_waiters);
-
- while (true) {
- res = try_release_reg_shm(cookie);
- if (res != TEE_ERROR_BUSY)
- break;
- condvar_wait(&shm_cv, &shm_mu);
- }
-
- assert(shm_release_waiters);
- shm_release_waiters--;
- mutex_unlock(&shm_mu);
-
- return res;
-}
-
-TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
-{
- TEE_Result res = TEE_SUCCESS;
- struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
-
- if (!r)
- return TEE_ERROR_GENERIC;
-
- if (refcount_inc(&r->mapcount))
- return TEE_SUCCESS;
-
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
-
- if (refcount_val(&r->mapcount))
- goto out;
-
- r->mm = tee_mm_alloc(&tee_mm_shm, SMALL_PAGE_SIZE * r->num_pages);
- if (!r->mm) {
- res = TEE_ERROR_OUT_OF_MEMORY;
- goto out;
- }
-
- res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
- r->num_pages, MEM_AREA_NSEC_SHM);
- if (res) {
- tee_mm_free(r->mm);
- r->mm = NULL;
- goto out;
- }
-
- refcount_set(&r->mapcount, 1);
-out:
- cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
-
- return res;
-}
-
-TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
-{
- struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
-
- if (!r)
- return TEE_ERROR_GENERIC;
-
- if (!refcount_dec(&r->mapcount))
- return TEE_SUCCESS;
-
- uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
-
- if (refcount_val(&r->mapcount)) {
- core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
- r->mobj.size / SMALL_PAGE_SIZE);
- tee_mm_free(r->mm);
- r->mm = NULL;
- }
-
- cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
-
- return TEE_SUCCESS;
-}
-
-
-struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
- paddr_t page_offset, uint64_t cookie)
-{
- struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
- page_offset, cookie);
-
- if (!mobj)
- return NULL;
-
- if (mobj_reg_shm_inc_map(mobj)) {
- mobj_free(mobj);
- return NULL;
- }
-
- return mobj;
-}
-
-static TEE_Result mobj_mapped_shm_init(void)
-{
- vaddr_t pool_start;
- vaddr_t pool_end;
-
- core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
- if (!pool_start || !pool_end)
- panic("Can't find region for shmem pool");
-
- if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
- TEE_MM_POOL_NO_FLAGS))
- panic("Could not create shmem pool");
-
- DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
- pool_start, pool_end);
- return TEE_SUCCESS;
-}
-
-service_init(mobj_mapped_shm_init);
/*
* mobj_shm implementation. mobj_shm represents buffer in predefined shm region
diff --git a/core/arch/arm/mm/mobj_dyn_shm.c b/core/arch/arm/mm/mobj_dyn_shm.c
new file mode 100644
index 00000000..175e4b97
--- /dev/null
+++ b/core/arch/arm/mm/mobj_dyn_shm.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2016-2017, Linaro Limited
+ */
+
+#include <assert.h>
+#include <initcall.h>
+#include <keep.h>
+#include <kernel/linker.h>
+#include <kernel/mutex.h>
+#include <kernel/panic.h>
+#include <kernel/refcount.h>
+#include <kernel/spinlock.h>
+#include <kernel/tee_misc.h>
+#include <mm/core_mmu.h>
+#include <mm/mobj.h>
+#include <mm/tee_mmu.h>
+#include <mm/tee_pager.h>
+#include <optee_msg.h>
+#include <sm/optee_smc.h>
+#include <stdlib.h>
+#include <tee_api_types.h>
+#include <types_ext.h>
+#include <util.h>
+
+static struct mutex shm_mu = MUTEX_INITIALIZER;
+static struct condvar shm_cv = CONDVAR_INITIALIZER;
+static size_t shm_release_waiters;
+
+/*
+ * mobj_reg_shm implementation. Describes shared memory provided by normal world
+ */
+
+struct mobj_reg_shm {
+ struct mobj mobj;
+ SLIST_ENTRY(mobj_reg_shm) next;
+ uint64_t cookie;
+ tee_mm_entry_t *mm;
+ paddr_t page_offset;
+ struct refcount refcount;
+ struct refcount mapcount;
+ int num_pages;
+ bool guarded;
+ paddr_t pages[];
+};
+
+static size_t mobj_reg_shm_size(size_t nr_pages)
+{
+ size_t s = 0;
+
+ if (MUL_OVERFLOW(sizeof(paddr_t), nr_pages, &s))
+ return 0;
+ if (ADD_OVERFLOW(sizeof(struct mobj_reg_shm), s, &s))
+ return 0;
+ return s;
+}
+
+static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
+ SLIST_HEAD_INITIALIZER(reg_shm_head);
+
+static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
+static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
+
+static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
+
+static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst,
+ size_t granule, paddr_t *pa)
+{
+ struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj);
+ size_t full_offset = 0;
+ paddr_t p = 0;
+
+ if (!pa)
+ return TEE_ERROR_GENERIC;
+
+ full_offset = offst + mobj_reg_shm->page_offset;
+ if (full_offset >= mobj->size)
+ return TEE_ERROR_GENERIC;
+
+ switch (granule) {
+ case 0:
+ p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE] +
+ (full_offset & SMALL_PAGE_MASK);
+ break;
+ case SMALL_PAGE_SIZE:
+ p = mobj_reg_shm->pages[full_offset / SMALL_PAGE_SIZE];
+ break;
+ default:
+ return TEE_ERROR_GENERIC;
+ }
+ *pa = p;
+
+ return TEE_SUCCESS;
+}
+KEEP_PAGER(mobj_reg_shm_get_pa);
+
+static size_t mobj_reg_shm_get_phys_offs(struct mobj *mobj,
+ size_t granule __maybe_unused)
+{
+ assert(granule >= mobj->phys_granule);
+ return to_mobj_reg_shm(mobj)->page_offset;
+}
+
+static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst)
+{
+ struct mobj_reg_shm *mrs = to_mobj_reg_shm(mobj);
+
+ if (!mrs->mm)
+ return NULL;
+
+ return (void *)(vaddr_t)(tee_mm_get_smem(mrs->mm) + offst +
+ mrs->page_offset);
+}
+
+static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
+{
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (r->mm) {
+ core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
+ r->mobj.size / SMALL_PAGE_SIZE);
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ }
+
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+}
+
+static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm)
+{
+ reg_shm_unmap_helper(mobj_reg_shm);
+ SLIST_REMOVE(&reg_shm_list, mobj_reg_shm, mobj_reg_shm, next);
+ free(mobj_reg_shm);
+}
+
+static void mobj_reg_shm_free(struct mobj *mobj)
+{
+ mobj_reg_shm_put(mobj);
+}
+
+static TEE_Result mobj_reg_shm_get_cattr(struct mobj *mobj __unused,
+ uint32_t *cattr)
+{
+ if (!cattr)
+ return TEE_ERROR_GENERIC;
+
+ *cattr = TEE_MATTR_CACHE_CACHED;
+
+ return TEE_SUCCESS;
+}
+
+static bool mobj_reg_shm_matches(struct mobj *mobj, enum buf_is_attr attr);
+
+static uint64_t mobj_reg_shm_get_cookie(struct mobj *mobj)
+{
+ return to_mobj_reg_shm(mobj)->cookie;
+}
+
+static const struct mobj_ops mobj_reg_shm_ops __rodata_unpaged = {
+ .get_pa = mobj_reg_shm_get_pa,
+ .get_phys_offs = mobj_reg_shm_get_phys_offs,
+ .get_va = mobj_reg_shm_get_va,
+ .get_cattr = mobj_reg_shm_get_cattr,
+ .matches = mobj_reg_shm_matches,
+ .free = mobj_reg_shm_free,
+ .get_cookie = mobj_reg_shm_get_cookie,
+};
+
+static bool mobj_reg_shm_matches(struct mobj *mobj __maybe_unused,
+ enum buf_is_attr attr)
+{
+ assert(mobj->ops == &mobj_reg_shm_ops);
+
+ return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
+}
+
+static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
+{
+ assert(mobj->ops == &mobj_reg_shm_ops);
+ return container_of(mobj, struct mobj_reg_shm, mobj);
+}
+
+static struct mobj_reg_shm *to_mobj_reg_shm_may_fail(struct mobj *mobj)
+{
+ if (mobj && mobj->ops != &mobj_reg_shm_ops)
+ return NULL;
+
+ return container_of(mobj, struct mobj_reg_shm, mobj);
+}
+
+struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
+ paddr_t page_offset, uint64_t cookie)
+{
+ struct mobj_reg_shm *mobj_reg_shm = NULL;
+ size_t i = 0;
+ uint32_t exceptions = 0;
+ size_t s = 0;
+
+ if (!num_pages)
+ return NULL;
+
+ s = mobj_reg_shm_size(num_pages);
+ if (!s)
+ return NULL;
+ mobj_reg_shm = calloc(1, s);
+ if (!mobj_reg_shm)
+ return NULL;
+
+ mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops;
+ mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE;
+ mobj_reg_shm->mobj.phys_granule = SMALL_PAGE_SIZE;
+ mobj_reg_shm->cookie = cookie;
+ mobj_reg_shm->guarded = true;
+ mobj_reg_shm->num_pages = num_pages;
+ mobj_reg_shm->page_offset = page_offset;
+ memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages);
+ refcount_set(&mobj_reg_shm->refcount, 1);
+
+ /* Ensure loaded references match format and security constraints */
+ for (i = 0; i < num_pages; i++) {
+ if (mobj_reg_shm->pages[i] & SMALL_PAGE_MASK)
+ goto err;
+
+ /* Only Non-secure memory can be mapped there */
+ if (!core_pbuf_is(CORE_MEM_NON_SEC, mobj_reg_shm->pages[i],
+ SMALL_PAGE_SIZE))
+ goto err;
+ }
+
+ exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
+ SLIST_INSERT_HEAD(&reg_shm_list, mobj_reg_shm, next);
+ cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
+
+ return &mobj_reg_shm->mobj;
+err:
+ free(mobj_reg_shm);
+ return NULL;
+}
+
+void mobj_reg_shm_unguard(struct mobj *mobj)
+{
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
+
+ to_mobj_reg_shm(mobj)->guarded = false;
+ cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
+}
+
+static struct mobj_reg_shm *reg_shm_find_unlocked(uint64_t cookie)
+{
+ struct mobj_reg_shm *mobj_reg_shm = NULL;
+
+ SLIST_FOREACH(mobj_reg_shm, &reg_shm_list, next)
+ if (mobj_reg_shm->cookie == cookie)
+ return mobj_reg_shm;
+
+ return NULL;
+}
+
+struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie)
+{
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
+ struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
+
+ if (r) {
+ /*
+ * Counter is supposed to be larger than 0, if it isn't
+ * we're in trouble.
+ */
+ if (!refcount_inc(&r->refcount))
+ panic();
+ }
+
+ cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
+
+ if (r)
+ return &r->mobj;
+
+ return NULL;
+}
+
+void mobj_reg_shm_put(struct mobj *mobj)
+{
+ struct mobj_reg_shm *r = to_mobj_reg_shm(mobj);
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
+
+ /*
+ * A put is supposed to match a get or the initial alloc, once
+ * we're at zero there's no more user and the original allocator is
+ * done too.
+ */
+ if (refcount_dec(&r->refcount))
+ reg_shm_free_helper(r);
+
+ cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
+
+ /*
+ * Note that we're reading this mutex protected variable without the
+ * mutex acquired. This isn't a problem since an eventually missed
+ * waiter who is waiting for this MOBJ will try again before hanging
+ * in condvar_wait().
+ */
+ if (shm_release_waiters) {
+ mutex_lock(&shm_mu);
+ condvar_broadcast(&shm_cv);
+ mutex_unlock(&shm_mu);
+ }
+}
+
+static TEE_Result try_release_reg_shm(uint64_t cookie)
+{
+ TEE_Result res = TEE_ERROR_BAD_PARAMETERS;
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
+ struct mobj_reg_shm *r = reg_shm_find_unlocked(cookie);
+
+ if (!r || r->guarded)
+ goto out;
+
+ res = TEE_ERROR_BUSY;
+ if (refcount_val(&r->refcount) == 1) {
+ reg_shm_free_helper(r);
+ res = TEE_SUCCESS;
+ }
+out:
+ cpu_spin_unlock_xrestore(&reg_shm_slist_lock, exceptions);
+
+ return res;
+}
+
+TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie)
+{
+ TEE_Result res = try_release_reg_shm(cookie);
+
+ if (res != TEE_ERROR_BUSY)
+ return res;
+
+ mutex_lock(&shm_mu);
+ shm_release_waiters++;
+ assert(shm_release_waiters);
+
+ while (true) {
+ res = try_release_reg_shm(cookie);
+ if (res != TEE_ERROR_BUSY)
+ break;
+ condvar_wait(&shm_cv, &shm_mu);
+ }
+
+ assert(shm_release_waiters);
+ shm_release_waiters--;
+ mutex_unlock(&shm_mu);
+
+ return res;
+}
+
+TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
+
+ if (!r)
+ return TEE_ERROR_GENERIC;
+
+ if (refcount_inc(&r->mapcount))
+ return TEE_SUCCESS;
+
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (refcount_val(&r->mapcount))
+ goto out;
+
+ r->mm = tee_mm_alloc(&tee_mm_shm, SMALL_PAGE_SIZE * r->num_pages);
+ if (!r->mm) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
+ r->num_pages, MEM_AREA_NSEC_SHM);
+ if (res) {
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ goto out;
+ }
+
+ refcount_set(&r->mapcount, 1);
+out:
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+
+ return res;
+}
+
+TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
+{
+ struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
+
+ if (!r)
+ return TEE_ERROR_GENERIC;
+
+ if (!refcount_dec(&r->mapcount))
+ return TEE_SUCCESS;
+
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (refcount_val(&r->mapcount)) {
+ core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
+ r->mobj.size / SMALL_PAGE_SIZE);
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ }
+
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+
+ return TEE_SUCCESS;
+}
+
+
+struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
+ paddr_t page_offset, uint64_t cookie)
+{
+ struct mobj *mobj = mobj_reg_shm_alloc(pages, num_pages,
+ page_offset, cookie);
+
+ if (!mobj)
+ return NULL;
+
+ if (mobj_reg_shm_inc_map(mobj)) {
+ mobj_free(mobj);
+ return NULL;
+ }
+
+ return mobj;
+}
+
+static TEE_Result mobj_mapped_shm_init(void)
+{
+ vaddr_t pool_start = 0;
+ vaddr_t pool_end = 0;
+
+ core_mmu_get_mem_by_type(MEM_AREA_SHM_VASPACE, &pool_start, &pool_end);
+ if (!pool_start || !pool_end)
+ panic("Can't find region for shmem pool");
+
+ if (!tee_mm_init(&tee_mm_shm, pool_start, pool_end, SMALL_PAGE_SHIFT,
+ TEE_MM_POOL_NO_FLAGS))
+ panic("Could not create shmem pool");
+
+ DMSG("Shared memory address range: %" PRIxVA ", %" PRIxVA,
+ pool_start, pool_end);
+ return TEE_SUCCESS;
+}
+
+service_init(mobj_mapped_shm_init);
diff --git a/core/arch/arm/mm/sub.mk b/core/arch/arm/mm/sub.mk
index 38853521..c09fe599 100644
--- a/core/arch/arm/mm/sub.mk
+++ b/core/arch/arm/mm/sub.mk
@@ -9,3 +9,4 @@ endif
srcs-y += tee_mm.c
srcs-y += pgt_cache.c
srcs-y += mobj.c
+srcs-$(CFG_CORE_DYN_SHM) += mobj_dyn_shm.c
diff --git a/core/arch/arm/tee/entry_fast.c b/core/arch/arm/tee/entry_fast.c
index 4e951ab2..9876e334 100644
--- a/core/arch/arm/tee/entry_fast.c
+++ b/core/arch/arm/tee/entry_fast.c
@@ -84,7 +84,7 @@ static void tee_entry_exchange_capabilities(struct thread_smc_args *args)
args->a0 = OPTEE_SMC_RETURN_OK;
args->a1 = OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM;
-#if defined(CFG_DYN_SHM_CAP)
+#if defined(CFG_DYN_SHM_CAP) && defined(CFG_CORE_DYN_SHM)
dyn_shm_en = core_mmu_nsec_ddr_is_defined();
if (dyn_shm_en)
args->a1 |= OPTEE_SMC_SEC_CAP_DYNAMIC_SHM;
diff --git a/core/arch/arm/tee/entry_std.c b/core/arch/arm/tee/entry_std.c
index d0b5763f..07fe5217 100644
--- a/core/arch/arm/tee/entry_std.c
+++ b/core/arch/arm/tee/entry_std.c
@@ -98,6 +98,7 @@ static TEE_Result set_tmem_param(const struct optee_msg_param_tmem *tmem,
return TEE_ERROR_BAD_PARAMETERS;
}
+#ifdef CFG_CORE_DYN_SHM
static TEE_Result set_rmem_param(const struct optee_msg_param_rmem *rmem,
struct param_mem *mem)
{
@@ -121,6 +122,7 @@ static TEE_Result set_rmem_param(const struct optee_msg_param_rmem *rmem,
return TEE_SUCCESS;
}
+#endif
static TEE_Result copy_in_params(const struct optee_msg_param *params,
uint32_t num_params,
@@ -167,6 +169,7 @@ static TEE_Result copy_in_params(const struct optee_msg_param *params,
pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr -
OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
break;
+#ifdef CFG_CORE_DYN_SHM
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
@@ -177,6 +180,7 @@ static TEE_Result copy_in_params(const struct optee_msg_param *params,
pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr -
OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
break;
+#endif
default:
return TEE_ERROR_BAD_PARAMETERS;
}
@@ -200,12 +204,13 @@ static void cleanup_shm_refs(const uint64_t *saved_attr,
if (saved_attr[n] & OPTEE_MSG_ATTR_NONCONTIG)
mobj_free(param->u[n].mem.mobj);
break;
-
+#ifdef CFG_CORE_DYN_SHM
case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
mobj_reg_shm_put(param->u[n].mem.mobj);
break;
+#endif
default:
break;
}
@@ -425,6 +430,7 @@ out:
smc_args->a0 = OPTEE_SMC_RETURN_OK;
}
+#ifdef CFG_CORE_DYN_SHM
static void register_shm(struct thread_smc_args *smc_args,
struct optee_msg_arg *arg, uint32_t num_params)
{
@@ -497,6 +503,7 @@ err:
mobj_free(mobj);
return NULL;
}
+#endif /*CFG_CORE_DYN_SHM*/
static struct mobj *get_cmd_buffer(paddr_t parg, uint32_t *num_params)
{
@@ -522,10 +529,10 @@ static struct mobj *get_cmd_buffer(paddr_t parg, uint32_t *num_params)
*/
void __weak tee_entry_std(struct thread_smc_args *smc_args)
{
- paddr_t parg;
- struct optee_msg_arg *arg = NULL; /* fix gcc warning */
- uint32_t num_params = 0; /* fix gcc warning */
- struct mobj *mobj;
+ paddr_t parg = 0;
+ struct optee_msg_arg *arg = NULL;
+ uint32_t num_params = 0;
+ struct mobj *mobj = NULL;
if (smc_args->a0 != OPTEE_SMC_CALL_WITH_ARG) {
EMSG("Unknown SMC 0x%" PRIx64, (uint64_t)smc_args->a0);
@@ -539,13 +546,16 @@ void __weak tee_entry_std(struct thread_smc_args *smc_args)
if (core_pbuf_is(CORE_MEM_NSEC_SHM, parg,
sizeof(struct optee_msg_arg))) {
mobj = get_cmd_buffer(parg, &num_params);
- } else {
+ }
+#ifdef CFG_CORE_DYN_SHM
+ else {
if (parg & SMALL_PAGE_MASK) {
smc_args->a0 = OPTEE_SMC_RETURN_EBADADDR;
return;
}
mobj = map_cmd_buffer(parg, &num_params);
}
+#endif
if (!mobj || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg)) {
EMSG("Bad arg address 0x%" PRIxPA, parg);
@@ -572,13 +582,14 @@ void __weak tee_entry_std(struct thread_smc_args *smc_args)
case OPTEE_MSG_CMD_CANCEL:
entry_cancel(smc_args, arg, num_params);
break;
+#ifdef CFG_CORE_DYN_SHM
case OPTEE_MSG_CMD_REGISTER_SHM:
register_shm(smc_args, arg, num_params);
break;
case OPTEE_MSG_CMD_UNREGISTER_SHM:
unregister_shm(smc_args, arg, num_params);
break;
-
+#endif
default:
EMSG("Unknown cmd 0x%x", arg->cmd);
smc_args->a0 = OPTEE_SMC_RETURN_EBADCMD;
diff --git a/core/include/kernel/msg_param.h b/core/include/kernel/msg_param.h
index a5b24bba..841c3bfa 100644
--- a/core/include/kernel/msg_param.h
+++ b/core/include/kernel/msg_param.h
@@ -29,11 +29,12 @@
#ifndef KERNEL_MSG_PARAM_H
#define KERNEL_MSG_PARAM_H
+#include <compiler.h>
+#include <kernel/msg_param.h>
+#include <mm/mobj.h>
#include <optee_msg.h>
#include <stdio.h>
#include <types_ext.h>
-#include <kernel/msg_param.h>
-#include <mm/mobj.h>
/**
* msg_param_mobj_from_noncontig() - construct mobj from non-contiguous
@@ -47,8 +48,18 @@
* return:
* mobj or NULL on error
*/
+#ifdef CFG_CORE_DYN_SHM
struct mobj *msg_param_mobj_from_noncontig(paddr_t buf_ptr, size_t size,
uint64_t shm_ref, bool map_buffer);
+#else
+static inline struct mobj *
+msg_param_mobj_from_noncontig(paddr_t buf_ptr __unused, size_t size __unused,
+ uint64_t shm_ref __unused,
+ bool map_buffer __unused)
+{
+ return NULL;
+}
+#endif
/**
* msg_param_attr_is_tmem - helper functions that cheks if attribute is tmem
diff --git a/core/kernel/sub.mk b/core/kernel/sub.mk
index 8fec1b02..b847f39f 100644
--- a/core/kernel/sub.mk
+++ b/core/kernel/sub.mk
@@ -7,7 +7,7 @@ srcs-y += pm.c
srcs-y += handle.c
srcs-y += interrupt.c
srcs-$(CFG_LOCKDEP) += lockdep.c
-srcs-y += msg_param.c
+srcs-$(CFG_CORE_DYN_SHM) += msg_param.c
srcs-y += panic.c
srcs-y += refcount.c
srcs-y += tee_misc.c