|
| 1 | +// SPDX-License-Identifier: BSD-2-Clause |
| 2 | +/* |
| 3 | + * Copyright (c) 2024, Linaro Limited |
| 4 | + */ |
| 5 | + |
| 6 | +#include <kernel/boot.h> |
| 7 | +#include <kernel/panic.h> |
| 8 | +#include <malloc.h> |
| 9 | +#include <mm/core_mmu.h> |
| 10 | +#include <mm/page_alloc.h> |
| 11 | +#include <mm/phys_mem.h> |
| 12 | +#include <mm/tee_mm.h> |
| 13 | +#include <string.h> |
| 14 | +#include <types_ext.h> |
| 15 | + |
| 16 | +static tee_mm_pool_t core_virt_nex_pool __nex_bss; |
| 17 | +static tee_mm_pool_t core_virt_tee_pool; |
| 18 | + |
| 19 | +static void init_virt_pool(tee_mm_pool_t *pool, uint32_t flags, |
| 20 | + enum teecore_memtypes memtype) |
| 21 | +{ |
| 22 | + vaddr_t start = 0; |
| 23 | + vaddr_t end = 0; |
| 24 | + |
| 25 | + core_mmu_get_mem_by_type(memtype, &start, &end); |
| 26 | + if (!start || !end) |
| 27 | + panic(); |
| 28 | + |
| 29 | + if (!tee_mm_init(pool, start, end - start, SMALL_PAGE_SHIFT, flags)) |
| 30 | + panic(); |
| 31 | +} |
| 32 | + |
| 33 | +void nex_page_alloc_init(void) |
| 34 | +{ |
| 35 | + init_virt_pool(&core_virt_nex_pool, TEE_MM_POOL_NEX_MALLOC, |
| 36 | + MEM_AREA_NEX_DYN_VASPACE); |
| 37 | +} |
| 38 | + |
| 39 | +void page_alloc_init(void) |
| 40 | +{ |
| 41 | + init_virt_pool(&core_virt_tee_pool, TEE_MM_POOL_NO_FLAGS, |
| 42 | + MEM_AREA_TEE_DYN_VASPACE); |
| 43 | +} |
| 44 | + |
| 45 | +vaddr_t virt_page_alloc(size_t count, uint32_t flags) |
| 46 | +{ |
| 47 | + enum teecore_memtypes memtype = 0; |
| 48 | + TEE_Result res = TEE_SUCCESS; |
| 49 | + tee_mm_pool_t *pool = NULL; |
| 50 | + tee_mm_entry_t *mmv = NULL; |
| 51 | + tee_mm_entry_t *mmp = NULL; |
| 52 | + size_t vcount = count; |
| 53 | + size_t pcount = count; |
| 54 | + vaddr_t va = 0; |
| 55 | + paddr_t pa = 0; |
| 56 | + |
| 57 | + if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && (flags & MAF_NEX)) { |
| 58 | + pool = &core_virt_nex_pool; |
| 59 | + memtype = MEM_AREA_NEX_DYN_VASPACE; |
| 60 | + } else { |
| 61 | + pool = &core_virt_tee_pool; |
| 62 | + memtype = MEM_AREA_TEE_DYN_VASPACE; |
| 63 | + } |
| 64 | + |
| 65 | + if (flags & MAF_GUARD_HEAD) |
| 66 | + vcount++; |
| 67 | + if (flags & MAF_GUARD_TAIL) |
| 68 | + vcount++; |
| 69 | + |
| 70 | + /* We're allocating one extra page to use as unmapped guard */ |
| 71 | + mmv = tee_mm_alloc_flags(pool, vcount * SMALL_PAGE_SIZE, flags); |
| 72 | + if (!mmv) |
| 73 | + return 0; |
| 74 | + va = tee_mm_get_smem(mmv); |
| 75 | + if (flags & MAF_GUARD_HEAD) |
| 76 | + va += SMALL_PAGE_SIZE; |
| 77 | + |
| 78 | + mmp = phys_mem_alloc_flags(pcount * SMALL_PAGE_SIZE, flags); |
| 79 | + if (!mmp) |
| 80 | + goto err_free_mmv; |
| 81 | + pa = tee_mm_get_smem(mmp); |
| 82 | + assert(pa); |
| 83 | + |
| 84 | + res = core_mmu_map_contiguous_pages(va, pa, pcount, memtype); |
| 85 | + if (res) |
| 86 | + goto err; |
| 87 | + |
| 88 | + if (flags & MAF_ZERO_INIT) |
| 89 | + memset((void *)va, 0, pcount * SMALL_PAGE_SIZE); |
| 90 | + |
| 91 | + return va; |
| 92 | +err: |
| 93 | + tee_mm_free(mmp); |
| 94 | +err_free_mmv: |
| 95 | + tee_mm_free(mmv); |
| 96 | + return 0; |
| 97 | +} |
0 commit comments