diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk
index 2da4f97ffd4..c70a5a355bb 100644
--- a/core/arch/arm/arm.mk
+++ b/core/arch/arm/arm.mk
@@ -27,6 +27,8 @@ endif
 
 CFG_MMAP_REGIONS ?= 13
 CFG_RESERVED_VASPACE_SIZE ?= (1024 * 1024 * 10)
+CFG_NEX_DYN_VASPACE_SIZE ?= (1024 * 1024)
+CFG_TEE_DYN_VASPACE_SIZE ?= (1024 * 1024)
 
 ifeq ($(CFG_ARM64_core),y)
 ifeq ($(CFG_ARM32_core),y)
diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c
index 24908ac8347..d23ae6d8b02 100644
--- a/core/arch/arm/kernel/boot.c
+++ b/core/arch/arm/kernel/boot.c
@@ -33,6 +33,7 @@
 #include <mm/core_memprot.h>
 #include <mm/core_mmu.h>
 #include <mm/fobj.h>
+#include <mm/page_alloc.h>
 #include <mm/phys_mem.h>
 #include <mm/tee_mm.h>
 #include <mm/tee_pager.h>
@@ -955,6 +956,15 @@ static void init_primary(unsigned long pageable_part)
 		 */
 		assert(va && va <= boot_cached_mem_end);
 		boot_cached_mem_end = va;
+
+		/*
+		 * This is needed to enable virt_page_alloc() now that
+		 * boot_mem_alloc() can't be used any longer.
+		 */
+		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
+			nex_page_alloc_init();
+		else
+			page_alloc_init();
 	}
 
 	if (IS_ENABLED(CFG_WITH_PAGER)) {
diff --git a/core/arch/arm/kernel/virtualization.c b/core/arch/arm/kernel/virtualization.c
index 68b65767efb..377ad08019b 100644
--- a/core/arch/arm/kernel/virtualization.c
+++ b/core/arch/arm/kernel/virtualization.c
@@ -18,6 +18,7 @@
 #include <kernel/virtualization.h>
 #include <mm/core_memprot.h>
 #include <mm/core_mmu.h>
+#include <mm/page_alloc.h>
 #include <mm/phys_mem.h>
 #include <mm/tee_mm.h>
 #include <platform_config.h>
@@ -327,6 +328,7 @@ TEE_Result virt_guest_created(uint16_t guest_id)
 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
 	phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram),
 		      tee_mm_get_bytes(prtn->ta_ram));
+	page_alloc_init();
 	/* Initialize threads */
 	thread_init_threads();
 	/* Do the preinitcalls */
diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c
index 745f522e246..68d7e4098f3 100644
--- a/core/arch/arm/mm/core_mmu_lpae.c
+++ b/core/arch/arm/mm/core_mmu_lpae.c
@@ -221,7 +221,8 @@
 #define MAX_XLAT_TABLES		(XLAT_TABLE_TEE_EXTRA + \
 				 XLAT_TABLE_VIRTUALIZATION_EXTRA + \
 				 XLAT_TABLE_ASLR_EXTRA + \
-				 XLAT_TABLE_USER_EXTRA)
+				 XLAT_TABLE_USER_EXTRA + \
+				 IS_ENABLED(CFG_DYN_CONFIG))
 #endif /*!MAX_XLAT_TABLES*/
 
 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
diff --git a/core/arch/riscv/riscv.mk b/core/arch/riscv/riscv.mk
index db879e59a08..b7147bbff91 100644
--- a/core/arch/riscv/riscv.mk
+++ b/core/arch/riscv/riscv.mk
@@ -14,6 +14,8 @@ include mk/cc-option.mk
 
 CFG_MMAP_REGIONS ?= 13
 CFG_RESERVED_VASPACE_SIZE ?= (1024 * 1024 * 10)
+CFG_NEX_DYN_VASPACE_SIZE ?= (1024 * 1024)
+CFG_TEE_DYN_VASPACE_SIZE ?= (1024 * 1024)
 
 ifeq ($(CFG_RV64_core),y)
 CFG_KERN_LINKER_FORMAT ?= elf64-littleriscv
diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h
index 55fb9b92af5..49f13b0a988 100644
--- a/core/include/mm/core_mmu.h
+++ b/core/include/mm/core_mmu.h
@@ -65,6 +65,8 @@
  * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
  * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
  * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
+ * MEM_AREA_NEX_DYN_VASPACE: nexus private dynamic memory map (secure)
+ * MEM_AREA_TEE_DYN_VASPACE: core private dynamic memory (secure)
  * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
  * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
  * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
@@ -94,6 +96,8 @@ enum teecore_memtypes {
 	MEM_AREA_INIT_RAM_RX,
 	MEM_AREA_NEX_RAM_RO,
 	MEM_AREA_NEX_RAM_RW,
+	MEM_AREA_NEX_DYN_VASPACE,
+	MEM_AREA_TEE_DYN_VASPACE,
 	MEM_AREA_TEE_COHERENT,
 	MEM_AREA_TEE_ASAN,
 	MEM_AREA_IDENTITY_MAP_RX,
@@ -128,6 +132,8 @@ static inline const char *teecore_memtype_name(enum teecore_memtypes type)
 		[MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
 		[MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
 		[MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
+		[MEM_AREA_NEX_DYN_VASPACE] = "NEX_DYN_VASPACE",
+		[MEM_AREA_TEE_DYN_VASPACE] = "TEE_DYN_VASPACE",
 		[MEM_AREA_TEE_ASAN] = "TEE_ASAN",
 		[MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
 		[MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
@@ -500,8 +506,15 @@ static inline size_t core_mmu_get_block_offset(
  */
 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
 {
-	return mm->type == MEM_AREA_RES_VASPACE ||
-		mm->type == MEM_AREA_SHM_VASPACE;
+	switch (mm->type) {
+	case MEM_AREA_RES_VASPACE:
+	case MEM_AREA_SHM_VASPACE:
+	case MEM_AREA_NEX_DYN_VASPACE:
+	case MEM_AREA_TEE_DYN_VASPACE:
+		return true;
+	default:
+		return false;
+	}
 }
 
 /*
diff --git a/core/include/mm/page_alloc.h b/core/include/mm/page_alloc.h
new file mode 100644
index 00000000000..9b92e2a3811
--- /dev/null
+++ b/core/include/mm/page_alloc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef __MM_PAGE_ALLOC_H
+#define __MM_PAGE_ALLOC_H
+
+#include <malloc_flags.h>
+#include <types_ext.h>
+#include <util.h>
+
+void nex_page_alloc_init(void);
+void page_alloc_init(void);
+
+vaddr_t virt_page_alloc(size_t count, uint32_t flags);
+
+#endif /*__MM_PAGE_ALLOC_H*/
diff --git a/core/include/mm/phys_mem.h b/core/include/mm/phys_mem.h
index 5ab037833f8..65a4b1b551b 100644
--- a/core/include/mm/phys_mem.h
+++ b/core/include/mm/phys_mem.h
@@ -67,4 +67,11 @@ static inline void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
 #endif
 #endif
 
+/*
+ * MAF_NEX selects nexus physical memory
+ * MAF_CORE_MEM selects core physical memory
+ * flags are passed on underlying implementation, tee_mm_alloc_flags().
+ */
+tee_mm_entry_t *phys_mem_alloc_flags(size_t size, uint32_t flags);
+
 #endif /*__MM_PHYS_MEM_H*/
diff --git a/core/include/mm/tee_mm.h b/core/include/mm/tee_mm.h
index 3968d280fa9..14098f6cc13 100644
--- a/core/include/mm/tee_mm.h
+++ b/core/include/mm/tee_mm.h
@@ -11,11 +11,11 @@
 #include <types_ext.h>
 
 /* Define to indicate default pool initiation */
-#define TEE_MM_POOL_NO_FLAGS            0
+#define TEE_MM_POOL_NO_FLAGS            MAF_NULL
 /* Flag to indicate that memory is allocated from hi address to low address */
-#define TEE_MM_POOL_HI_ALLOC            (1u << 0)
+#define TEE_MM_POOL_HI_ALLOC            MAF_HI_ALLOC
 /* Flag to indicate that pool should use nex_malloc instead of malloc */
-#define TEE_MM_POOL_NEX_MALLOC             (1u << 1)
+#define TEE_MM_POOL_NEX_MALLOC          MAF_NEX
 
 struct _tee_mm_entry_t {
 	struct _tee_mm_pool_t *pool;
@@ -68,12 +68,18 @@ bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
 /* Kill managed memory area*/
 void tee_mm_final(tee_mm_pool_t *pool);
 
+tee_mm_entry_t *tee_mm_alloc_flags(tee_mm_pool_t *pool, size_t size,
+				   uint32_t flags);
+
 /*
  * Allocates size number of bytes in the paged virtual address space
  * Returns a handle to the memory. The handle is used as an input to
  * the tee_mm_free function.
  */
-tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size);
+static inline tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
+{
+	return tee_mm_alloc_flags(pool, size, MAF_NULL);
+}
 
 /* Allocate supplied memory range if it's free */
 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size);
diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c
index 8ec29211408..14cedd0fa5c 100644
--- a/core/mm/core_mmu.c
+++ b/core/mm/core_mmu.c
@@ -842,6 +842,8 @@ uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
 	case MEM_AREA_TEE_RAM_RW:
 	case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
 	case MEM_AREA_NEX_RAM_RW:
+	case MEM_AREA_NEX_DYN_VASPACE:
+	case MEM_AREA_TEE_DYN_VASPACE:
 	case MEM_AREA_TEE_ASAN:
 		return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
 	case MEM_AREA_TEE_COHERENT:
@@ -1204,6 +1206,14 @@ static void collect_mem_ranges(struct memory_map *mem_map)
 
 	add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
 	add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
+	if (IS_ENABLED(CFG_DYN_CONFIG)) {
+		if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
+			add_va_space(mem_map, MEM_AREA_NEX_DYN_VASPACE,
+				     ROUNDUP(CFG_NEX_DYN_VASPACE_SIZE,
+					     CORE_MMU_PGDIR_SIZE));
+		add_va_space(mem_map, MEM_AREA_TEE_DYN_VASPACE,
+			     CFG_TEE_DYN_VASPACE_SIZE);
+	}
 }
 
 static void assign_mem_granularity(struct memory_map *mem_map)
@@ -1554,6 +1564,8 @@ static void check_mem_map(struct memory_map *mem_map)
 		case MEM_AREA_RES_VASPACE:
 		case MEM_AREA_SHM_VASPACE:
 		case MEM_AREA_PAGER_VASPACE:
+		case MEM_AREA_NEX_DYN_VASPACE:
+		case MEM_AREA_TEE_DYN_VASPACE:
 			break;
 		default:
 			EMSG("Uhandled memtype %d", m->type);
@@ -1934,16 +1946,19 @@ static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,
 
 void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
 {
-	struct core_mmu_table_info tbl_info;
-	unsigned int idx;
+	struct core_mmu_table_info tbl_info = { };
+	unsigned int idx = 0;
 	vaddr_t vaddr = mm->va;
 	paddr_t paddr = mm->pa;
 	ssize_t size_left = mm->size;
-	unsigned int level;
-	bool table_found;
-	uint32_t old_attr;
+	uint32_t attr = mm->attr;
+	unsigned int level = 0;
+	bool table_found = false;
+	uint32_t old_attr = 0;
 
 	assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
+	if (!paddr)
+		attr = 0;
 
 	while (size_left > 0) {
 		level = CORE_MMU_BASE_TABLE_LEVEL;
@@ -1982,7 +1997,7 @@ void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
 			if (old_attr)
 				panic("Page is already mapped");
 
-			core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
+			core_mmu_set_entry(&tbl_info, idx, paddr, attr);
 			/*
 			 * Dynamic vaspace regions don't have a physical
 			 * address initially but we need to allocate and
@@ -2582,6 +2597,7 @@ static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
 	if (!mmap)
 		mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);
+
 	/*
 	 * Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
 	 * used with pager and not needed here.
@@ -2607,6 +2623,8 @@ void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
 		va = phys_to_virt_tee_ram(pa, len);
 		break;
 	case MEM_AREA_SHM_VASPACE:
+	case MEM_AREA_NEX_DYN_VASPACE:
+	case MEM_AREA_TEE_DYN_VASPACE:
 		/* Find VA from PA in dynamic SHM is not yet supported */
 		va = NULL;
 		break;
diff --git a/core/mm/page_alloc.c b/core/mm/page_alloc.c
new file mode 100644
index 00000000000..dec5f0b3bb0
--- /dev/null
+++ b/core/mm/page_alloc.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#include <kernel/boot.h>
+#include <kernel/panic.h>
+#include <malloc.h>
+#include <mm/core_mmu.h>
+#include <mm/page_alloc.h>
+#include <mm/phys_mem.h>
+#include <mm/tee_mm.h>
+#include <string.h>
+#include <types_ext.h>
+
+static tee_mm_pool_t core_virt_nex_pool __nex_bss;
+static tee_mm_pool_t core_virt_tee_pool;
+
+static void init_virt_pool(tee_mm_pool_t *pool, uint32_t flags,
+			   enum teecore_memtypes memtype)
+{
+	vaddr_t start = 0;
+	vaddr_t end = 0;
+
+	core_mmu_get_mem_by_type(memtype, &start, &end);
+	if (!start || !end)
+		panic();
+
+	if (!tee_mm_init(pool, start, end - start, SMALL_PAGE_SHIFT, flags))
+		panic();
+}
+
+void nex_page_alloc_init(void)
+{
+	init_virt_pool(&core_virt_nex_pool, TEE_MM_POOL_NEX_MALLOC,
+		       MEM_AREA_NEX_DYN_VASPACE);
+}
+
+void page_alloc_init(void)
+{
+	init_virt_pool(&core_virt_tee_pool, TEE_MM_POOL_NO_FLAGS,
+		       MEM_AREA_TEE_DYN_VASPACE);
+}
+
+vaddr_t virt_page_alloc(size_t count, uint32_t flags)
+{
+	enum teecore_memtypes memtype = 0;
+	TEE_Result res = TEE_SUCCESS;
+	tee_mm_pool_t *pool = NULL;
+	tee_mm_entry_t *mmv = NULL;
+	tee_mm_entry_t *mmp = NULL;
+	size_t vcount = count;
+	size_t pcount = count;
+	vaddr_t va = 0;
+	paddr_t pa = 0;
+
+	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && (flags & MAF_NEX)) {
+		pool = &core_virt_nex_pool;
+		memtype = MEM_AREA_NEX_DYN_VASPACE;
+	} else {
+		pool = &core_virt_tee_pool;
+		memtype = MEM_AREA_TEE_DYN_VASPACE;
+	}
+
+	if (flags & MAF_GUARD_HEAD)
+		vcount++;
+	if (flags & MAF_GUARD_TAIL)
+		vcount++;
+
+	/* We're allocating one extra page to use as unmapped guard */
+	mmv = tee_mm_alloc_flags(pool, vcount * SMALL_PAGE_SIZE, flags);
+	if (!mmv)
+		return 0;
+	va = tee_mm_get_smem(mmv);
+	if (flags & MAF_GUARD_HEAD)
+		va += SMALL_PAGE_SIZE;
+
+	mmp = phys_mem_alloc_flags(pcount * SMALL_PAGE_SIZE, flags);
+	if (!mmp)
+		goto err_free_mmv;
+	pa = tee_mm_get_smem(mmp);
+	assert(pa);
+
+	res = core_mmu_map_contiguous_pages(va, pa, pcount, memtype);
+	if (res)
+		goto err;
+
+	if (flags & MAF_ZERO_INIT)
+		memset((void *)va, 0, pcount * SMALL_PAGE_SIZE);
+
+	return va;
+err:
+	tee_mm_free(mmp);
+err_free_mmv:
+	tee_mm_free(mmv);
+	return 0;
+}
diff --git a/core/mm/phys_mem.c b/core/mm/phys_mem.c
index 25b522fd4c8..e6164367809 100644
--- a/core/mm/phys_mem.c
+++ b/core/mm/phys_mem.c
@@ -86,26 +86,26 @@ tee_mm_entry_t *nex_phys_mem_mm_find(paddr_t addr)
 }
 
 static tee_mm_entry_t *mm_alloc(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
-				size_t size)
+				size_t size, uint32_t flags)
 {
 	tee_mm_entry_t *mm = NULL;
 
 	if (p0)
-		mm = tee_mm_alloc(p0, size);
+		mm = tee_mm_alloc_flags(p0, size, flags);
 	if (!mm && p1)
-		mm = tee_mm_alloc(p1, size);
+		mm = tee_mm_alloc_flags(p1, size, flags);
 
 	return mm;
 }
 
 tee_mm_entry_t *nex_phys_mem_core_alloc(size_t size)
 {
-	return mm_alloc(nex_core_pool, NULL, size);
+	return mm_alloc(nex_core_pool, NULL, size, MAF_NULL);
 }
 
 tee_mm_entry_t *nex_phys_mem_ta_alloc(size_t size)
 {
-	return mm_alloc(nex_ta_pool, nex_core_pool, size);
+	return mm_alloc(nex_ta_pool, nex_core_pool, size, MAF_NULL);
 }
 
 static tee_mm_entry_t *mm_alloc2(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
@@ -199,12 +199,12 @@ tee_mm_entry_t *phys_mem_core_alloc(size_t size)
 	 * should normally be able to use one pool only, but if we have two
 	 * make sure to use both even for core allocations.
 	 */
-	return mm_alloc(core_pool, ta_pool, size);
+	return mm_alloc(core_pool, ta_pool, size, MAF_NULL);
 }
 
 tee_mm_entry_t *phys_mem_ta_alloc(size_t size)
 {
-	return mm_alloc(ta_pool, core_pool, size);
+	return mm_alloc(ta_pool, core_pool, size, MAF_NULL);
 }
 
 tee_mm_entry_t *phys_mem_alloc2(paddr_t base, size_t size)
@@ -222,3 +222,21 @@ void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
 }
 #endif /*CFG_WITH_STATS*/
 #endif /*CFG_NS_VIRTUALIZATION*/
+
+tee_mm_entry_t *phys_mem_alloc_flags(size_t size, uint32_t flags)
+{
+	tee_mm_pool_t *core = nex_core_pool;
+	tee_mm_pool_t *ta = nex_ta_pool;
+
+#if defined(CFG_NS_VIRTUALIZATION)
+	if (!(flags & MAF_NEX)) {
+		core = core_pool;
+		ta = ta_pool;
+	}
+#endif
+
+	if (flags & MAF_CORE_MEM)
+		return mm_alloc(core, NULL, size, flags);
+	else
+		return mm_alloc(ta, core, size, flags);
+}
diff --git a/core/mm/sub.mk b/core/mm/sub.mk
index d9e3a3f27e9..4f5d6bb6719 100644
--- a/core/mm/sub.mk
+++ b/core/mm/sub.mk
@@ -11,3 +11,4 @@ ifneq ($(CFG_CORE_FFA),y)
 srcs-$(CFG_CORE_DYN_SHM) += mobj_dyn_shm.c
 endif
 srcs-y += boot_mem.c
+srcs-y += page_alloc.c
diff --git a/core/mm/tee_mm.c b/core/mm/tee_mm.c
index 0596b724b25..99a0884f1ee 100644
--- a/core/mm/tee_mm.c
+++ b/core/mm/tee_mm.c
@@ -12,30 +12,6 @@
 #include <trace.h>
 #include <util.h>
 
-static void *pmalloc(tee_mm_pool_t *pool, size_t size)
-{
-	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
-		return nex_malloc(size);
-	else
-		return malloc(size);
-}
-
-static void *pcalloc(tee_mm_pool_t *pool, size_t num_el, size_t size)
-{
-	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
-		return nex_calloc(num_el, size);
-	else
-		return calloc(num_el, size);
-}
-
-static void pfree(tee_mm_pool_t *pool, void *ptr)
-{
-	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
-		nex_free(ptr);
-	else
-		free(ptr);
-}
-
 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
 		 uint8_t shift, uint32_t flags)
 {
@@ -58,7 +34,9 @@ bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
 		.flags = flags,
 	};
 
-	pool->entry = pcalloc(pool, 1, sizeof(tee_mm_entry_t));
+	pool->entry = malloc_flags(pool->flags | MAF_ZERO_INIT, NULL,
+				   MALLOC_DEFAULT_ALIGNMENT,
+				   sizeof(tee_mm_entry_t));
 	if (pool->entry == NULL)
 		return false;
 
@@ -78,7 +56,7 @@ void tee_mm_final(tee_mm_pool_t *pool)
 
 	while (pool->entry->next != NULL)
 		tee_mm_free(pool->entry->next);
-	pfree(pool, pool->entry);
+	free_flags(pool->flags, pool->entry);
 	pool->entry = NULL;
 }
 
@@ -141,19 +119,23 @@ static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
 }
 #endif /* CFG_WITH_STATS */
 
-tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
+tee_mm_entry_t *tee_mm_alloc_flags(tee_mm_pool_t *pool, size_t size,
+				   uint32_t flags)
 {
-	size_t psize;
-	tee_mm_entry_t *entry;
-	tee_mm_entry_t *nn;
-	size_t remaining;
-	uint32_t exceptions;
+	size_t psize = 0;
+	tee_mm_entry_t *entry = NULL;
+	tee_mm_entry_t *nn = NULL;
+	size_t remaining = 0;
+	uint32_t exceptions = 0;
 
 	/* Check that pool is initialized */
 	if (!pool || !pool->entry)
 		return NULL;
 
-	nn = pmalloc(pool, sizeof(tee_mm_entry_t));
+	flags &= ~MAF_NEX;	/* This flag must come from pool->flags */
+	flags |= pool->flags;
+	nn  = malloc_flags(flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
+			   sizeof(tee_mm_entry_t));
 	if (!nn)
 		return NULL;
 
@@ -223,7 +205,7 @@ tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
 	return nn;
 err:
 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
-	pfree(pool, nn);
+	free_flags(flags, nn);
 	return NULL;
 }
 
@@ -264,7 +246,8 @@ tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
 	if ((base + size) < base || base < pool->lo)
 		return NULL;
 
-	mm = pmalloc(pool, sizeof(tee_mm_entry_t));
+	mm  = malloc_flags(pool->flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
+			   sizeof(tee_mm_entry_t));
 	if (!mm)
 		return NULL;
 
@@ -299,7 +282,7 @@ tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
 	return mm;
 err:
 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
-	pfree(pool, mm);
+	free_flags(pool->flags, mm);
 	return NULL;
 }
 
@@ -324,7 +307,7 @@ void tee_mm_free(tee_mm_entry_t *p)
 	entry->next = entry->next->next;
 	cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
 
-	pfree(p->pool, p);
+	free_flags(p->pool->flags, p);
 }
 
 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
diff --git a/lib/libutils/isoc/include/malloc_flags.h b/lib/libutils/isoc/include/malloc_flags.h
index 58b1df3b085..42b3adb1a4d 100644
--- a/lib/libutils/isoc/include/malloc_flags.h
+++ b/lib/libutils/isoc/include/malloc_flags.h
@@ -22,9 +22,25 @@
  * avoid needless translation of one class of flags to another class of
  * flags.
  */
-#define MAF_NULL	0x0	/* Passed if no flags are needed */
-#define MAF_ZERO_INIT	0x1	/* Zero initialize the allocated buffer */
-#define MAF_NEX		0x2	/* Allocate from nexus heap */
-#define MAF_FREE_WIPE	0x4	/* Free wipes allocated buffer */
+#define MAF_NULL	0x00	/* Passed if no flags are needed */
+#define MAF_ZERO_INIT	0x01	/* Zero initialize the allocated buffer */
+#define MAF_NEX		0x02	/* Allocate from nexus heap */
+#define MAF_FREE_WIPE	0x04	/* Free wipes allocated buffer */
+/*
+ * Used by tee_mm_init() to indicatate that the pool should allocate
+ * from high address to low address.
+ */
+#define MAF_HI_ALLOC	0x10
+/*
+ * Used by phys_mem_alloc_flags() to indicate whether physical memory
+ * should be allocated from the Core or TA physical memory pool.
+ */
+#define MAF_CORE_MEM	0x20
+/*
+ * Used by virt_page_alloc() to inidicate whether the allocated memory
+ * should by guarded by an unmapped page at the beginning and end.
+ */
+#define MAF_GUARD_HEAD	0x40
+#define MAF_GUARD_TAIL	0x80
 
 #endif /*__MALLOC_FLAGS_H*/