Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
K
kernel
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Analyze
Contributor analytics
CI/CD analytics
Repository analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
ACS
Public
HermitOS
kernel
Commits
88e516d5
Commit
88e516d5
authored
2 years ago
by
Martin Kröning
Browse files
Options
Downloads
Patches
Plain Diff
Move map_heap to arch
parent
a455cbfa
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/arch/aarch64/mm/paging.rs
+15
-0
15 additions, 0 deletions
src/arch/aarch64/mm/paging.rs
src/arch/x86_64/mm/paging.rs
+15
-0
15 additions, 0 deletions
src/arch/x86_64/mm/paging.rs
src/mm/mod.rs
+8
-22
8 additions, 22 deletions
src/mm/mod.rs
with
38 additions
and
22 deletions
src/arch/aarch64/mm/paging.rs
+
15
−
0
View file @
88e516d5
...
...
@@ -592,6 +592,21 @@ pub fn map<S: PageSize>(
root_pagetable
.map_pages
(
range
,
physical_address
,
flags
);
}
pub
fn
map_heap
<
S
:
PageSize
>
(
virt_addr
:
VirtAddr
,
count
:
usize
)
{
let
flags
=
{
let
mut
flags
=
PageTableEntryFlags
::
empty
();
flags
.normal
()
.writable
()
.execute_disable
();
flags
};
let
virt_addrs
=
(
0
..
count
)
.map
(|
n
|
virt_addr
+
n
*
S
::
SIZE
as
usize
);
for
virt_addr
in
virt_addrs
{
let
phys_addr
=
physicalmem
::
allocate_aligned
(
S
::
SIZE
as
usize
,
S
::
SIZE
as
usize
)
.unwrap
();
map
::
<
S
>
(
virt_addr
,
phys_addr
,
1
,
flags
);
}
}
pub
fn
unmap
<
S
:
PageSize
>
(
virtual_address
:
VirtAddr
,
count
:
usize
)
{
trace!
(
"Unmapping virtual address {:#X} ({} pages)"
,
...
...
This diff is collapsed.
Click to expand it.
src/arch/x86_64/mm/paging.rs
+
15
−
0
View file @
88e516d5
...
...
@@ -190,6 +190,21 @@ pub fn map<S: PageSize>(
apic
::
ipi_tlb_flush
();
}
pub
fn
map_heap
<
S
:
PageSize
>
(
virt_addr
:
VirtAddr
,
count
:
usize
)
{
let
flags
=
{
let
mut
flags
=
PageTableEntryFlags
::
empty
();
flags
.normal
()
.writable
()
.execute_disable
();
flags
};
let
virt_addrs
=
(
0
..
count
)
.map
(|
n
|
virt_addr
+
n
*
S
::
SIZE
as
usize
);
for
virt_addr
in
virt_addrs
{
let
phys_addr
=
physicalmem
::
allocate_aligned
(
S
::
SIZE
as
usize
,
S
::
SIZE
as
usize
)
.unwrap
();
map
::
<
S
>
(
virt_addr
,
phys_addr
,
1
,
flags
);
}
}
unsafe
fn
recursive_page_table
()
->
RecursivePageTable
<
'static
>
{
let
level_4_table_addr
=
0xFFFF_FFFF_FFFF_F000
;
let
level_4_table_ptr
=
ptr
::
from_exposed_addr_mut
(
level_4_table_addr
);
...
...
This diff is collapsed.
Click to expand it.
src/mm/mod.rs
+
8
−
22
View file @
88e516d5
...
...
@@ -51,24 +51,10 @@ pub fn task_heap_end() -> VirtAddr {
unsafe
{
HEAP_END_ADDRESS
}
}
fn
map_heap
<
S
:
PageSize
>
(
virt_addr
:
VirtAddr
,
count
:
usize
)
{
let
flags
=
{
let
mut
flags
=
PageTableEntryFlags
::
empty
();
flags
.normal
()
.writable
()
.execute_disable
();
flags
};
let
virt_addrs
=
(
0
..
count
)
.map
(|
n
|
virt_addr
+
n
*
S
::
SIZE
as
usize
);
for
virt_addr
in
virt_addrs
{
let
phys_addr
=
arch
::
mm
::
physicalmem
::
allocate_aligned
(
S
::
SIZE
as
usize
,
S
::
SIZE
as
usize
)
.unwrap
();
arch
::
mm
::
paging
::
map
::
<
S
>
(
virt_addr
,
phys_addr
,
1
,
flags
);
}
}
#[cfg(target_os
=
"none"
)]
pub
fn
init
()
{
use
crate
::
arch
::
mm
::
paging
;
// Calculate the start and end addresses of the 2 MiB page(s) that map the kernel.
unsafe
{
KERNEL_START_ADDRESS
=
env
::
get_base_address
()
.align_down_to_large_page
();
...
...
@@ -182,7 +168,7 @@ pub fn init() {
// try to map a huge page
let
mut
counter
=
if
has_1gib_pages
&&
virt_size
>
HugePageSize
::
SIZE
as
usize
{
map_heap
::
<
HugePageSize
>
(
virt_addr
,
1
);
paging
::
map_heap
::
<
HugePageSize
>
(
virt_addr
,
1
);
HugePageSize
::
SIZE
as
usize
}
else
{
0
...
...
@@ -190,13 +176,13 @@ pub fn init() {
if
counter
==
0
&&
has_2mib_pages
{
// fall back to large pages
map_heap
::
<
LargePageSize
>
(
virt_addr
,
1
);
paging
::
map_heap
::
<
LargePageSize
>
(
virt_addr
,
1
);
counter
=
LargePageSize
::
SIZE
as
usize
;
}
if
counter
==
0
{
// fall back to normal pages, but map at least the size of a large page
map_heap
::
<
BasePageSize
>
(
paging
::
map_heap
::
<
BasePageSize
>
(
virt_addr
,
LargePageSize
::
SIZE
as
usize
/
BasePageSize
::
SIZE
as
usize
,
);
...
...
@@ -219,21 +205,21 @@ pub fn init() {
&&
align_down!
(
map_addr
.as_usize
(),
HugePageSize
::
SIZE
as
usize
)
==
0
{
let
size
=
align_down!
(
map_size
,
HugePageSize
::
SIZE
as
usize
);
map_heap
::
<
HugePageSize
>
(
map_addr
,
size
/
HugePageSize
::
SIZE
as
usize
);
paging
::
map_heap
::
<
HugePageSize
>
(
map_addr
,
size
/
HugePageSize
::
SIZE
as
usize
);
map_size
-=
size
;
map_addr
+=
size
;
}
if
has_2mib_pages
&&
map_size
>
LargePageSize
::
SIZE
as
usize
{
let
size
=
align_down!
(
map_size
,
LargePageSize
::
SIZE
as
usize
);
map_heap
::
<
LargePageSize
>
(
map_addr
,
size
/
LargePageSize
::
SIZE
as
usize
);
paging
::
map_heap
::
<
LargePageSize
>
(
map_addr
,
size
/
LargePageSize
::
SIZE
as
usize
);
map_size
-=
size
;
map_addr
+=
size
;
}
if
map_size
>
BasePageSize
::
SIZE
as
usize
{
let
size
=
align_down!
(
map_size
,
BasePageSize
::
SIZE
as
usize
);
map_heap
::
<
BasePageSize
>
(
map_addr
,
size
/
BasePageSize
::
SIZE
as
usize
);
paging
::
map_heap
::
<
BasePageSize
>
(
map_addr
,
size
/
BasePageSize
::
SIZE
as
usize
);
map_size
-=
size
;
map_addr
+=
size
;
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment