Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add dynamic vaspace #7285

Merged
merged 8 commits into from
Mar 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ endif

CFG_MMAP_REGIONS ?= 13
CFG_RESERVED_VASPACE_SIZE ?= (1024 * 1024 * 10)
CFG_NEX_DYN_VASPACE_SIZE ?= (1024 * 1024)
CFG_TEE_DYN_VASPACE_SIZE ?= (1024 * 1024)

ifeq ($(CFG_ARM64_core),y)
ifeq ($(CFG_ARM32_core),y)
Expand Down
10 changes: 10 additions & 0 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include <mm/fobj.h>
#include <mm/page_alloc.h>
#include <mm/phys_mem.h>
#include <mm/tee_mm.h>
#include <mm/tee_pager.h>
Expand Down Expand Up @@ -955,6 +956,15 @@ static void init_primary(unsigned long pageable_part)
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;

/*
* This is needed to enable virt_page_alloc() now that
* boot_mem_alloc() can't be used any longer.
*/
if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
nex_page_alloc_init();
else
page_alloc_init();
}

if (IS_ENABLED(CFG_WITH_PAGER)) {
Expand Down
2 changes: 2 additions & 0 deletions core/arch/arm/kernel/virtualization.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include <kernel/virtualization.h>
#include <mm/core_memprot.h>
#include <mm/core_mmu.h>
#include <mm/page_alloc.h>
#include <mm/phys_mem.h>
#include <mm/tee_mm.h>
#include <platform_config.h>
Expand Down Expand Up @@ -327,6 +328,7 @@ TEE_Result virt_guest_created(uint16_t guest_id)
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram),
tee_mm_get_bytes(prtn->ta_ram));
page_alloc_init();
/* Initialize threads */
thread_init_threads();
/* Do the preinitcalls */
Expand Down
3 changes: 2 additions & 1 deletion core/arch/arm/mm/core_mmu_lpae.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,8 @@
#define MAX_XLAT_TABLES (XLAT_TABLE_TEE_EXTRA + \
XLAT_TABLE_VIRTUALIZATION_EXTRA + \
XLAT_TABLE_ASLR_EXTRA + \
XLAT_TABLE_USER_EXTRA)
XLAT_TABLE_USER_EXTRA + \
IS_ENABLED(CFG_DYN_CONFIG))
#endif /*!MAX_XLAT_TABLES*/

#if (CORE_MMU_BASE_TABLE_LEVEL == 0)
Expand Down
2 changes: 2 additions & 0 deletions core/arch/riscv/riscv.mk
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ include mk/cc-option.mk

CFG_MMAP_REGIONS ?= 13
CFG_RESERVED_VASPACE_SIZE ?= (1024 * 1024 * 10)
CFG_NEX_DYN_VASPACE_SIZE ?= (1024 * 1024)
CFG_TEE_DYN_VASPACE_SIZE ?= (1024 * 1024)

ifeq ($(CFG_RV64_core),y)
CFG_KERN_LINKER_FORMAT ?= elf64-littleriscv
Expand Down
17 changes: 15 additions & 2 deletions core/include/mm/core_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@
* MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
* MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
* MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
* MEM_AREA_NEX_DYN_VASPACE: nexus private dynamic memory map (secure)
* MEM_AREA_TEE_DYN_VASPACE: core private dynamic memory (secure)
* MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
* MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
* MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
Expand Down Expand Up @@ -94,6 +96,8 @@ enum teecore_memtypes {
MEM_AREA_INIT_RAM_RX,
MEM_AREA_NEX_RAM_RO,
MEM_AREA_NEX_RAM_RW,
MEM_AREA_NEX_DYN_VASPACE,
MEM_AREA_TEE_DYN_VASPACE,
MEM_AREA_TEE_COHERENT,
MEM_AREA_TEE_ASAN,
MEM_AREA_IDENTITY_MAP_RX,
Expand Down Expand Up @@ -128,6 +132,8 @@ static inline const char *teecore_memtype_name(enum teecore_memtypes type)
[MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
[MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
[MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
[MEM_AREA_NEX_DYN_VASPACE] = "NEX_DYN_VASPACE",
[MEM_AREA_TEE_DYN_VASPACE] = "TEE_DYN_VASPACE",
[MEM_AREA_TEE_ASAN] = "TEE_ASAN",
[MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
[MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
Expand Down Expand Up @@ -500,8 +506,15 @@ static inline size_t core_mmu_get_block_offset(
*/
static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
{
return mm->type == MEM_AREA_RES_VASPACE ||
mm->type == MEM_AREA_SHM_VASPACE;
switch (mm->type) {
case MEM_AREA_RES_VASPACE:
case MEM_AREA_SHM_VASPACE:
case MEM_AREA_NEX_DYN_VASPACE:
case MEM_AREA_TEE_DYN_VASPACE:
return true;
default:
return false;
}
}

/*
Expand Down
18 changes: 18 additions & 0 deletions core/include/mm/page_alloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2024, Linaro Limited
*/

#ifndef __MM_PAGE_ALLOC_H
#define __MM_PAGE_ALLOC_H

#include <malloc_flags.h>
#include <types_ext.h>
#include <util.h>

void nex_page_alloc_init(void);
void page_alloc_init(void);

vaddr_t virt_page_alloc(size_t count, uint32_t flags);

#endif /*__MM_PAGE_ALLOC_H*/
7 changes: 7 additions & 0 deletions core/include/mm/phys_mem.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,11 @@ static inline void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
#endif
#endif

/*
* MAF_NEX selects nexus physical memory
* MAF_CORE_MEM selects core physical memory
* flags are passed on underlying implementation, tee_mm_alloc_flags().
*/
tee_mm_entry_t *phys_mem_alloc_flags(size_t size, uint32_t flags);

#endif /*__MM_PHYS_MEM_H*/
14 changes: 10 additions & 4 deletions core/include/mm/tee_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
#include <types_ext.h>

/* Define to indicate default pool initiation */
#define TEE_MM_POOL_NO_FLAGS 0
#define TEE_MM_POOL_NO_FLAGS MAF_NULL
/* Flag to indicate that memory is allocated from hi address to low address */
#define TEE_MM_POOL_HI_ALLOC (1u << 0)
#define TEE_MM_POOL_HI_ALLOC MAF_HI_ALLOC
/* Flag to indicate that pool should use nex_malloc instead of malloc */
#define TEE_MM_POOL_NEX_MALLOC (1u << 1)
#define TEE_MM_POOL_NEX_MALLOC MAF_NEX

struct _tee_mm_entry_t {
struct _tee_mm_pool_t *pool;
Expand Down Expand Up @@ -68,12 +68,18 @@ bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
/* Kill managed memory area*/
void tee_mm_final(tee_mm_pool_t *pool);

tee_mm_entry_t *tee_mm_alloc_flags(tee_mm_pool_t *pool, size_t size,
uint32_t flags);

/*
* Allocates size number of bytes in the paged virtual address space
* Returns a handle to the memory. The handle is used as an input to
* the tee_mm_free function.
*/
tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size);
static inline tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
{
return tee_mm_alloc_flags(pool, size, MAF_NULL);
}

/* Allocate supplied memory range if it's free */
tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size);
Expand Down
30 changes: 24 additions & 6 deletions core/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -842,6 +842,8 @@ uint32_t core_mmu_type_to_attr(enum teecore_memtypes t)
case MEM_AREA_TEE_RAM_RW:
case MEM_AREA_NEX_RAM_RO: /* This has to be r/w during init runtime */
case MEM_AREA_NEX_RAM_RW:
case MEM_AREA_NEX_DYN_VASPACE:
case MEM_AREA_TEE_DYN_VASPACE:
case MEM_AREA_TEE_ASAN:
return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged;
case MEM_AREA_TEE_COHERENT:
Expand Down Expand Up @@ -1204,6 +1206,14 @@ static void collect_mem_ranges(struct memory_map *mem_map)

add_va_space(mem_map, MEM_AREA_RES_VASPACE, CFG_RESERVED_VASPACE_SIZE);
add_va_space(mem_map, MEM_AREA_SHM_VASPACE, SHM_VASPACE_SIZE);
if (IS_ENABLED(CFG_DYN_CONFIG)) {
if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
add_va_space(mem_map, MEM_AREA_NEX_DYN_VASPACE,
ROUNDUP(CFG_NEX_DYN_VASPACE_SIZE,
CORE_MMU_PGDIR_SIZE));
add_va_space(mem_map, MEM_AREA_TEE_DYN_VASPACE,
CFG_TEE_DYN_VASPACE_SIZE);
}
}

static void assign_mem_granularity(struct memory_map *mem_map)
Expand Down Expand Up @@ -1554,6 +1564,8 @@ static void check_mem_map(struct memory_map *mem_map)
case MEM_AREA_RES_VASPACE:
case MEM_AREA_SHM_VASPACE:
case MEM_AREA_PAGER_VASPACE:
case MEM_AREA_NEX_DYN_VASPACE:
case MEM_AREA_TEE_DYN_VASPACE:
break;
default:
EMSG("Uhandled memtype %d", m->type);
Expand Down Expand Up @@ -1934,16 +1946,19 @@ static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr,

void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
{
struct core_mmu_table_info tbl_info;
unsigned int idx;
struct core_mmu_table_info tbl_info = { };
unsigned int idx = 0;
vaddr_t vaddr = mm->va;
paddr_t paddr = mm->pa;
ssize_t size_left = mm->size;
unsigned int level;
bool table_found;
uint32_t old_attr;
uint32_t attr = mm->attr;
unsigned int level = 0;
bool table_found = false;
uint32_t old_attr = 0;

assert(!((vaddr | paddr) & SMALL_PAGE_MASK));
if (!paddr)
attr = 0;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should there be some kind of warning? Maybe a DMSG().

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, this is normal. The alternative is to make sure that struct tee_mmap_region without an assigned physical address never has the attr field set. I didn't think it was worth the effort to test for that in the other place(s).


while (size_left > 0) {
level = CORE_MMU_BASE_TABLE_LEVEL;
Expand Down Expand Up @@ -1982,7 +1997,7 @@ void core_mmu_map_region(struct mmu_partition *prtn, struct tee_mmap_region *mm)
if (old_attr)
panic("Page is already mapped");

core_mmu_set_entry(&tbl_info, idx, paddr, mm->attr);
core_mmu_set_entry(&tbl_info, idx, paddr, attr);
/*
* Dynamic vaspace regions don't have a physical
* address initially but we need to allocate and
Expand Down Expand Up @@ -2582,6 +2597,7 @@ static void *phys_to_virt_tee_ram(paddr_t pa, size_t len)
mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RO, pa, len);
if (!mmap)
mmap = find_map_by_type_and_pa(MEM_AREA_TEE_RAM_RX, pa, len);

/*
* Note that MEM_AREA_INIT_RAM_RO and MEM_AREA_INIT_RAM_RX are only
* used with pager and not needed here.
Expand All @@ -2607,6 +2623,8 @@ void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len)
va = phys_to_virt_tee_ram(pa, len);
break;
case MEM_AREA_SHM_VASPACE:
case MEM_AREA_NEX_DYN_VASPACE:
case MEM_AREA_TEE_DYN_VASPACE:
/* Find VA from PA in dynamic SHM is not yet supported */
va = NULL;
break;
Expand Down
97 changes: 97 additions & 0 deletions core/mm/page_alloc.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
// SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2024, Linaro Limited
*/

#include <kernel/boot.h>
#include <kernel/panic.h>
#include <malloc.h>
#include <mm/core_mmu.h>
#include <mm/page_alloc.h>
#include <mm/phys_mem.h>
#include <mm/tee_mm.h>
#include <string.h>
#include <types_ext.h>

static tee_mm_pool_t core_virt_nex_pool __nex_bss;
static tee_mm_pool_t core_virt_tee_pool;

static void init_virt_pool(tee_mm_pool_t *pool, uint32_t flags,
enum teecore_memtypes memtype)
{
vaddr_t start = 0;
vaddr_t end = 0;

core_mmu_get_mem_by_type(memtype, &start, &end);
if (!start || !end)
panic();

if (!tee_mm_init(pool, start, end - start, SMALL_PAGE_SHIFT, flags))
panic();
}

void nex_page_alloc_init(void)
{
init_virt_pool(&core_virt_nex_pool, TEE_MM_POOL_NEX_MALLOC,
MEM_AREA_NEX_DYN_VASPACE);
}

void page_alloc_init(void)
{
init_virt_pool(&core_virt_tee_pool, TEE_MM_POOL_NO_FLAGS,
MEM_AREA_TEE_DYN_VASPACE);
}

vaddr_t virt_page_alloc(size_t count, uint32_t flags)
{
enum teecore_memtypes memtype = 0;
TEE_Result res = TEE_SUCCESS;
tee_mm_pool_t *pool = NULL;
tee_mm_entry_t *mmv = NULL;
tee_mm_entry_t *mmp = NULL;
size_t vcount = count;
size_t pcount = count;
vaddr_t va = 0;
paddr_t pa = 0;

if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && (flags & MAF_NEX)) {
pool = &core_virt_nex_pool;
memtype = MEM_AREA_NEX_DYN_VASPACE;
} else {
pool = &core_virt_tee_pool;
memtype = MEM_AREA_TEE_DYN_VASPACE;
}

if (flags & MAF_GUARD_HEAD)
vcount++;
if (flags & MAF_GUARD_TAIL)
vcount++;

/* We're allocating one extra page to use as unmapped guard */
mmv = tee_mm_alloc_flags(pool, vcount * SMALL_PAGE_SIZE, flags);
if (!mmv)
return 0;
va = tee_mm_get_smem(mmv);
if (flags & MAF_GUARD_HEAD)
va += SMALL_PAGE_SIZE;

mmp = phys_mem_alloc_flags(pcount * SMALL_PAGE_SIZE, flags);
if (!mmp)
goto err_free_mmv;
pa = tee_mm_get_smem(mmp);
assert(pa);

res = core_mmu_map_contiguous_pages(va, pa, pcount, memtype);
if (res)
goto err;

if (flags & MAF_ZERO_INIT)
memset((void *)va, 0, pcount * SMALL_PAGE_SIZE);

return va;
err:
tee_mm_free(mmp);
err_free_mmv:
tee_mm_free(mmv);
return 0;
}
Loading