-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathvmregion.cc
128 lines (112 loc) · 3.79 KB
/
vmregion.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#include "core/vmregion.h"
#include <ebl/assert.h>
#include <ebl/memory.h>
#include <ebl/status.h>
#include <ebl/util.h>
#include "core/mem.h"
#include "core/vm.h"
#include "ebl/type_traits.h"
#include "x86-64/types.h"
using namespace core;
using ebl::MakeRefPtr;
using ebl::RefPtr;
Result<RefPtr<VmRegion>> VmRegion::allocate_vmr_compact(size_t size,
uint8_t align_pow2,
VmRegionFlags flags) {
// Ensure child VMR capability is a subset of parent VMR capability.
if((flags.capability & flags_.capability) != flags.capability) {
return E::PERMISSION_DENIED;
}
// Ensure this is a region VMR.
if(flags.type != VmRegionType::REGION) {
return E::INVALID_ARGUMENT;
}
bool found_hole = false;
vaddr_t new_hole_base;
size_t new_hole_size;
// The child VMR to insert before, to keep sorted order of children_
VmRegion* child = nullptr;
// TODO: Currently, this is just a first-fit allocator.
const vaddr_t align = ebl::max(1U << align_pow2, arch::page_size);
assert(size % arch::page_size == 0, "Size is not page-aligned!");
foreach_hole([&](VmRegion* x, vaddr_t hole_base, size_t hole_size) -> bool {
auto aligned_base = ebl::align_up(hole_base, align);
if(aligned_base + size > hole_base + hole_size) return true;
// Found it.
new_hole_base = aligned_base;
new_hole_size = size;
found_hole = true;
child = x;
return false;
});
if(!found_hole) return E::ALLOCATION_FAILED;
// Allocate the new VMR and insert it into the children_ list
VmRegionFlags new_flags{};
new_flags.type = VmRegionType::REGION;
new_flags.capability = flags.capability;
new_flags.is_root = 0;
auto result = MakeRefPtr<VmRegion>(new_hole_base, new_hole_size, new_flags, aspace_);
if(!result) return result.status();
auto new_vmr = result.unwrap();
new_vmr->parent_ = this;
if(child == nullptr) {
children_.push_back(new_vmr);
} else {
children_.insert_before(child, new_vmr);
}
return new_vmr;
}
Result<RefPtr<VmRegion>> VmRegion::allocate_vmr_sparse(size_t size,
uint8_t align_pow2,
VmRegionFlags flags) {
(void)size;
(void)align_pow2;
(void)flags;
return E::NOT_IMPLEMENTED;
}
Result<RefPtr<VmRegion>> VmRegion::map_pages(vaddr_t offset,
size_t size,
VmRegionFlags flags,
RefPtr<VmObject> object,
vaddr_t vmo_offset,
arch::mmu_flags mmu_flags) {
(void)offset;
(void)size;
(void)flags;
(void)mmu_flags;
(void)object;
(void)vmo_offset;
return E::NOT_IMPLEMENTED;
}
Result<void> VmRegion::protect(vaddr_t addr, vaddr_t size, arch::mmu_flags flags) {
(void)addr;
(void)size;
(void)flags;
return E::NOT_IMPLEMENTED;
}
Result<void> VmRegion::destroy() {
E status = E::OK;
while(!children_.empty()) {
auto child = children_.pop_front();
child->parent_ = nullptr;
auto res = child->destroy();
if(res) status = res.status();
}
if(parent_ != nullptr) {
parent_->children_.remove(this);
}
return status;
}
RefPtr<VmRegion> VmRegion::find_child_above(vaddr_t offset) {
for(auto child : children_) {
if(child->base_ >= this->base_ + offset)
return child;
}
return nullptr;
}
bool VmRegion::does_overlap_child(vaddr_t offset, size_t size) {
auto end = this->base_ + offset + size;
auto child = find_child_above(offset);
if(!child) return false;
return child->base_ <= end;
}