Rewrote memory management again and added support for x64 and forking.

This commit is contained in:
Jonas 'Sortie' Termansen 2011-10-02 15:58:08 +02:00
parent 23c144fe3f
commit 66192d1e86
17 changed files with 1017 additions and 1001 deletions

View File

@ -177,10 +177,10 @@ namespace Maxsi
{
Memory::Set(Bins, 0, sizeof(Bins));
#ifdef SORTIX_KERNEL
Wilderness = (byte*) Sortix::VirtualMemory::heapUpper;
Wilderness = (byte*) Sortix::Memory::HEAPUPPER;
#else
// TODO: This is very 32-bit specific!
Wilderness = (byte*) 0x80000000;
Wilderness = (byte*) 0x80000000UL;
#endif
HeapStart = Wilderness;
WildernessSize = 0;
@ -199,7 +199,7 @@ namespace Maxsi
#ifdef SORTIX_KERNEL
// Check if the wilderness would grow larger than the kernel memory area.
if ( ( ((uintptr_t) Wilderness) - Sortix::VirtualMemory::heapLower ) < NeededSize ) { return false; }
if ( ( ((uintptr_t) Wilderness) - Sortix::Memory::HEAPLOWER ) < NeededSize ) { return false; }
#endif
// Figure out how where the new wilderness will be.
@ -235,8 +235,13 @@ namespace Maxsi
// Get a raw unused physical page.
addr_t Page = Sortix::Page::Get();
if ( Page == 0 )
// Map the physical page to a virtual one.
addr_t VirtualAddr = NewWilderness + 4096 * PagesLeft;
if ( Page == 0 || !Sortix::Memory::MapKernel(Page, VirtualAddr) )
{
if ( Page != 0 ) { Sortix::Page::Put(Page); }
// If none is available, simply let the allocation fail
// and unallocate everything we did allocate so far.
while ( PagesLeft < NumPages )
@ -244,16 +249,11 @@ namespace Maxsi
PagesLeft++;
addr_t OldVirtual = NewWilderness + 4096 * PagesLeft;
addr_t OldPage = Sortix::VirtualMemory::UnmapKernel(OldVirtual);
addr_t OldPage = Sortix::Memory::UnmapKernel(OldVirtual);
Sortix::Page::Put(OldPage);
}
return false;
}
// Map the physical page to a virtual one.
addr_t VirtualAddr = NewWilderness + 4096 * PagesLeft;
Sortix::VirtualMemory::MapKernel(VirtualAddr, Page);
}
}
#endif

View File

@ -26,7 +26,8 @@ endif
ifdef X86FAMILY
CPUOBJS:=$(CPUOBJS) \
$(CPU)/memorymanagement-asm.o \
$(CPU)/memorymanagement.o \
x86-family/memorymanagement.o \
$(CPU)/interrupt.o \
$(CPU)/gdt.o \
x86-family/x86-family.o
@ -51,7 +52,6 @@ time.o \
log.o \
panic.o \
keyboard.o \
memorymanagement.o \
scheduler.o \
syscall.o \
sound.o \

View File

@ -35,6 +35,15 @@ namespace Sortix
{
namespace ELF
{
// This works around an optimizer bug I ran into, where the memcpy below
// somehow gets executed prior to the memory was mapped. Somehow, when I
// tried to debug it, it suddenly worked. So here's some deep magic that
// somehow fixes my code.
void PreventHazardousCodeReordering()
{
Log::Print("");
}
addr_t Construct32(Process* process, const void* file, size_t filelen)
{
if ( filelen < sizeof(Header32) ) { return 0; }
@ -85,22 +94,24 @@ namespace Sortix
return 0;
}
if ( !VirtualMemory::MapRangeUser(mapto, mapbytes) )
if ( !Memory::MapRangeUser(mapto, mapbytes) )
{
return 0;
}
// Insert our newly allocated memory into the processes segment
// list such that it can be reclaimed later.
if ( process->segments ) { process->segments->prev = segment;}
if ( process->segments ) { process->segments->prev = segment; }
segment->next = process->segments;
process->segments = segment;
PreventHazardousCodeReordering();
// Copy as much data as possible and memset the rest to 0.
byte* memdest = (byte*) virtualaddr;
byte* memsource = (byte*) ( (addr_t)file + pht->offset);
Memory::Copy(memdest, memsource, pht->filesize);
Memory::Set(memdest + pht->filesize, 0, pht->memorysize - pht->filesize);
byte* memsource = (byte*) ( ((addr_t)file) + pht->offset);
Maxsi::Memory::Copy(memdest, memsource, pht->filesize);
Maxsi::Memory::Set(memdest + pht->filesize, 0, pht->memorysize - pht->filesize);
}
return entry;

View File

@ -34,6 +34,8 @@ namespace Sortix
{
namespace Interrupt
{
const bool debugexception = true;
size_t numknownexceptions = 19;
const char* exceptions[] =
{ "Divide by zero", "Debug", "Non maskable interrupt", "Breakpoint",
@ -59,8 +61,18 @@ namespace Sortix
const char* message = ( regs->int_no < numknownexceptions )
? exceptions[regs->int_no] : "Unknown";
if ( debugexception )
{
Log::PrintF("cs=0x%x, eax=0x%zx, ebx=0x%zx, ecx=0x%zx, "
"edx=0x%zx, esi=0x%zx, edi=0x%zx, esp=0x%zx, "
"useresp=0x%zx, test=0x%zx\n",
regs->cs, regs->eax, regs->ebx, regs->ecx,
regs->edx, regs->esi, regs->edi, regs->esp,
regs->useresp, regs->useresp);
}
// Halt and catch fire if we are the kernel.
if ( (regs->cs & (0x4-1)) == 0 || regs->int_no == 13 )
if ( (regs->cs & (0x4-1)) == 0 )
{
PanicF("Unhandled CPU Exception id %zu '%s' at eip=0x%zx "
"(cr2=0x%p, err_code=0x%p)", regs->int_no, message,

View File

@ -176,8 +176,8 @@ namespace Sortix
PCI::Init();
#endif
// Initialize the paging.
Page::Init(BootInfo);
// Initialize the paging and virtual memory.
Memory::Init(BootInfo);
uint8_t* initrd = NULL;
size_t initrdsize = 0;
@ -206,9 +206,6 @@ namespace Sortix
if ( BootInfo == NULL ) { Panic("kernel.cpp: The bootinfo structure was NULL. Are your bootloader multiboot compliant?"); }
// Initialize virtual memory. TODO: This is not fully working yet.
VirtualMemory::Init();
// Initialize the kernel heap.
Maxsi::Memory::Init();
@ -224,11 +221,11 @@ namespace Sortix
Thread::Entry initstart = RunApplication;
// Create an address space for the first process.
addr_t addrspace = VirtualMemory::CreateAddressSpace();
addr_t addrspace = Memory::Fork();
// Use the new address space!
VirtualMemory::SwitchAddressSpace(addrspace);
Memory::SwitchAddressSpace(addrspace);
// Create the first process!
Process* process = new Process(addrspace);
if ( process == 0 ) { Panic("kernel.cpp: Could not allocate the first process!"); }

View File

@ -1,650 +0,0 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.cpp
Handles memory for the x86 architecture.
******************************************************************************/
#include "platform.h"
#include <libmaxsi/memory.h>
#include "log.h"
#include "panic.h"
#include "multiboot.h"
#include "memorymanagement.h"
using namespace Maxsi;
namespace Sortix
{
const addr_t KernelStart = 0x000000UL;
const size_t KernelLength = 0x200000UL;
const size_t KernelLeadingPages = KernelLength / 0x1000UL;
namespace Page
{
struct UnallocPage // Must like this due to assembly.
{
size_t Magic;
void* Next;
size_t ContinuousPages;
};
// Refers to private assembly functions.
addr_t GetPrivate();
void PutPrivate(addr_t page);
void Fragmentize();
UnallocPage* volatile UnallocatedPage; // Must have this name and namespace due to assembly.
size_t pagesTotal;
size_t pagesUsed;
size_t pagesFree;
const size_t UnallocPageMagic = 0xABBAACDC; // Must this value due to assembly
// Creates the Great Linked List of All Linked Lists!
void Init(multiboot_info_t* BootInfo)
{
UnallocatedPage = NULL;
pagesTotal = 0;
if ( !( BootInfo->flags & MULTIBOOT_INFO_MEM_MAP ) )
{
Panic("memorymanagement.cpp: The memory map flag was't set in the multiboot structure. Are your bootloader multiboot compliant?");
}
for (
multiboot_memory_map_t* MMap = (multiboot_memory_map_t*) BootInfo->mmap_addr;
(uintptr_t) MMap < BootInfo->mmap_addr + BootInfo->mmap_length;
MMap = (multiboot_memory_map_t *) ((uintptr_t) MMap + MMap->size + sizeof(MMap->size))
)
{
// Check that we can use this kind of RAM.
if ( MMap->type != 1 ) { continue; }
//Log::PrintF("RAM at 0x%64x\t of length 0x%64zx\n", MMap->addr, MMap->len);
// The kernels code may split this memory area into multiple pieces.
struct { uintptr_t Base; size_t Length; } Entries[2]; nat Num = 1;
// Attempt to crop the entry so that we only map what we can address.
Entries[0].Base = (uintptr_t) MMap->addr;
Entries[0].Length = MMap->len;
#ifdef PLATFORM_X86
// Figure out if the memory area is addressable (are our pointers big enough?)
if ( 0xFFFFFFFF < MMap->addr ) { continue; }
if ( 0xFFFFFFFF < MMap->addr + MMap->len ) { Entries[0].Length = 0xFFFFFFFF - MMap->addr; }
#endif
// Detect if this memory is completely covered by the kernel.
if ( KernelStart <= Entries[0].Base && Entries[0].Base + Entries[0].Length <= KernelStart + KernelLength ) { continue; }
// Detect if this memory is partially covered by the kernel (from somewhere in the memory to somewhere else in the memory)
else if ( Entries[0].Base <= KernelStart && KernelStart + KernelLength <= Entries[0].Base + Entries[0].Length )
{
Entries[1].Base = KernelStart + KernelLength;
Entries[1].Length = (Entries[0].Base + Entries[0].Length) - Entries[1].Base;
Entries[0].Length = KernelStart - Entries[0].Base;
Num = 2;
}
// Detect if this memory is partially covered by the kernel (from the left to somewhere in the memory)
else if ( Entries[0].Base <= KernelStart + KernelLength && KernelStart + KernelLength <= Entries[0].Base + Entries[0].Length )
{
Entries[0].Length = (Entries[0].Base + Entries[0].Length) - (KernelStart + KernelLength);
Entries[0].Base = KernelStart + KernelLength;
}
// Detect if this memory is partially covered by the kernel (from somewhere in the memory to the right)
else if ( Entries[0].Base <= KernelStart && KernelStart <= Entries[0].Base + Entries[0].Length )
{
Entries[0].Length = KernelStart - Entries[0].Base;
}
for ( nat I = 0; I < Num; I++ )
{
// Align our entries on page boundaries.
uintptr_t NewBase = (Entries[I].Base + 0xFFF) / 0x1000 * 0x1000;
Entries[I].Length = (Entries[I].Base + Entries[I].Length ) - NewBase;
Entries[I].Length /= 0x1000;
Entries[I].Base = NewBase;
if ( Entries[I].Length == 0 ) { continue; }
#ifdef PLATFORM_X64
Log::Print("Halt: CPU X64 cannot continue as the virtual memory isn't disabled (kernel bug) and the code is about to access non-mapped memory.\n");
Log::Print("Sorry, it simply isn't possible to fully boot Sortix in x64 mode yet.\n");
Log::Print("X64 may be working when Sortix 0.5 comes out, or try the git master.\n");
while(true);
#endif
UnallocPage* Page = (UnallocPage*) Entries[I].Base;
Page->Magic = UnallocPageMagic;
Page->Next = UnallocatedPage;
Page->ContinuousPages = Entries[I].Length - 1;
pagesTotal += Entries[I].Length;
UnallocatedPage = Page;
}
}
if ( pagesTotal == 0 ) { Panic("memorymanagement.cpp: no RAM were available for paging"); }
// Alright, time to make our linked list into a lot of small entries.
// This speeds up the system when it's fully up and running. It only
// takes a few miliseconds to run this operation on my laptop.
Fragmentize();
pagesFree = pagesTotal;
pagesUsed = 0;
ASSERT(pagesFree + pagesUsed == pagesTotal);
#ifndef PLATFORM_SERIAL
//Log::PrintF("%zu pages are available for paging (%zu MiB RAM)\n", PagesTotal, PagesTotal >> 8 /* * 0x1000 / 1024 / 1024*/);
#endif
}
addr_t Get()
{
addr_t result = GetPrivate();
if ( result != 0 )
{
ASSERT(pagesFree > 0);
pagesUsed++;
pagesFree--;
}
else
{
ASSERT(pagesFree == 0);
}
ASSERT(pagesFree + pagesUsed == pagesTotal);
return result;
}
void Put(addr_t page)
{
pagesFree++;
pagesUsed--;
PutPrivate(page);
}
void Insert(addr_t page)
{
pagesFree++;
pagesTotal++;
PutPrivate(page);
}
void GetStats(size_t* pagesize, size_t* numfree, size_t* numused)
{
*pagesize = 4096UL;
*numfree = pagesFree;
*numused = pagesUsed;
}
}
namespace VirtualMemory
{
const size_t TABLE_PRESENT = (1<<0);
const size_t TABLE_WRITABLE = (1<<1);
const size_t TABLE_USER_SPACE = (1<<2);
const size_t TABLE_RESERVED1 = (1<<3); // Used internally by the CPU.
const size_t TABLE_RESERVED2 = (1<<4); // Used internally by the CPU.
const size_t TABLE_ACCESSED = (1<<5);
const size_t TABLE_DIRTY = (1<<6);
const size_t TABLE_RESERVED3 = (1<<7); // Used internally by the CPU.
const size_t TABLE_RESERVED4 = (1<<8); // Used internally by the CPU.
const size_t TABLE_AVAILABLE1 = (1<<9);
const size_t TABLE_AVAILABLE2 = (1<<10);
const size_t TABLE_AVAILABLE3 = (1<<11);
const size_t TABLE_FLAGS = (0xFFFUL); // Bits used for the flags.
const size_t TABLE_ADDRESS = (~0xFFFUL); // Bits used for the address.
const size_t DIR_PRESENT = (1<<0);
const size_t DIR_WRITABLE = (1<<1);
const size_t DIR_USER_SPACE = (1<<2);
const size_t DIR_WRITE_THROUGH = (1<<3);
const size_t DIR_DISABLE_CACHE = (1<<4);
const size_t DIR_ACCESSED = (1<<5);
const size_t DIR_RESERVED1 = (1<<6);
const size_t DIR_4MIB_PAGES = (1<<7);
const size_t DIR_RESERVED2 = (1<<8);
const size_t DIR_AVAILABLE1 = (1<<9);
const size_t DIR_AVAILABLE2 = (1<<10);
const size_t DIR_AVAILABLE3 = (1<<11);
const size_t DIR_FLAGS = (0xFFFUL); // Bits used for the flags.
const size_t DIR_ADDRESS = (~0xFFFUL); // Bits used for the address.
const size_t ENTRIES = 4096 / sizeof(addr_t);
struct Table
{
addr_t page[ENTRIES];
};
struct Dir
{
addr_t table[ENTRIES];
};
#ifdef PLATFORM_X86
// These structures are always virtually mapped to these addresses.
Table* const makingTable = (Table*) 0xFFBFC000UL;
Dir* const makingDir = (Dir*) 0xFFBFD000UL;
Dir* const kernelDir = (Dir*) 0xFFBFE000UL;
Dir* const currentDir = (Dir*) 0xFFBFF000UL;
Table* const currentTables = (Table*) 0xFFC00000UL;
#endif
#ifdef PLATFORM_X64
// TODO: These are dummy values!
const Dir* currentDir = (Dir*) 0xFACEB00C;
const Table* currentTables = (Table*) 0xFACEB00C;
#endif
addr_t currentDirPhysical;
#ifdef PLATFORM_X86
Table* BootstrapCreateTable(Dir* dir, addr_t where);
void BootstrapMap(Dir* dir, addr_t where, addr_t physical);
void BootstrapMapStructures(Dir* dir);
addr_t SwitchDirectory(addr_t dir);
addr_t CreateDirectory();
#endif
void Init()
{
#ifdef PLATFORM_X86
// Initialize variables.
currentDirPhysical = 0;
// Allocate a page we can use for our kernel page directory.
Dir* dirphys = (Dir*) Page::Get();
if ( dirphys == NULL ) { Panic("memorymanagement.cpp: Could not allocate page dir"); }
Memory::Set(dirphys, 0, sizeof(Dir));
// Identity map the kernel.
for ( addr_t ptr = KernelStart; ptr < KernelStart + KernelLength; ptr += 0x1000UL )
{
BootstrapMap(dirphys, ptr, ptr);
}
// Create every table used in the kernel half. We do it now such that
// any copies of the kernel dir never gets out of date.
for ( addr_t ptr = 0x80000000UL; ptr != 0UL; ptr += ENTRIES * 0x1000UL )
{
BootstrapCreateTable(dirphys, ptr);
}
// Map the paging structures themselves.
BootstrapMapStructures(dirphys);
// We have now created a minimal virtual environment where the kernel
// is mapped, the paging structures are ready, and the paging
// structures are mapped. We are now ready to enable pages.
// Switch the current dir - this enables paging.
SwitchDirectory((addr_t) dirphys);
// Hello, virtual world!
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
#ifdef PLATFORM_X86
inline addr_t GetTableId(addr_t where) { return where / (4096UL * ENTRIES); }
inline addr_t GetPageId(addr_t where) { return ( where / 4096UL ) % ENTRIES; }
Table* BootstrapCreateTable(Dir* dir, addr_t where)
{
size_t tableid = GetTableId(where);
addr_t tabledesc = dir->table[tableid];
if ( tabledesc != 0 )
{
return (Table*) (tabledesc & TABLE_ADDRESS);
}
else
{
addr_t tablepage = Page::Get();
if ( tablepage == 0 )
{
PanicF("memorymanagement.cpp: Could not allocate bootstrap page table for 0x%p", where);
}
Memory::Set((void*) tablepage, 0, sizeof(Table));
tabledesc = tablepage | TABLE_PRESENT | TABLE_WRITABLE;
dir->table[tableid] = tabledesc;
ASSERT((Table*) tablepage == BootstrapCreateTable(dir, where));
return (Table*) tablepage;
}
}
void BootstrapMap(Dir* dir, addr_t where, addr_t physical)
{
Table* table = BootstrapCreateTable(dir, where);
size_t pageid = GetPageId(where);
table->page[pageid] = physical | TABLE_PRESENT | TABLE_WRITABLE;
}
void BootstrapMapStructures(Dir* dir)
{
// Map the dir itself.
BootstrapMap(dir, (addr_t) kernelDir, (addr_t) dir);
BootstrapMap(dir, (addr_t) currentDir, (addr_t) dir);
// Map the tables.
for ( size_t i = 0; i < ENTRIES; i++ )
{
addr_t tabledesc = dir->table[i];
if ( tabledesc == 0 ) { continue; }
addr_t mapto = (addr_t) &(currentTables[i]);
addr_t mapfrom = (tabledesc & TABLE_ADDRESS);
BootstrapMap(dir, mapto, mapfrom);
}
}
#endif
#ifdef PLATFORM_X86
addr_t Lookup(addr_t where)
{
// Make sure we are only mapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
if ( !(tabledesc & DIR_PRESENT) ) { return 0; }
size_t pageid = GetPageId(where);
return currentTables[tableid].page[pageid];
}
// Enables paging and flushes the Translation Lookaside Buffer (TLB).
void Flush()
{
asm volatile("mov %0, %%cr3":: "r"(currentDirPhysical));
size_t cr0; \
asm volatile("mov %%cr0, %0": "=r"(cr0));
cr0 |= 0x80000000UL; // Enable paging!
asm volatile("mov %0, %%cr0":: "r"(cr0));
}
addr_t CreateAddressSpace()
{
return CreateDirectory();
}
addr_t SwitchAddressSpace(addr_t addrspace)
{
return SwitchDirectory(addrspace);
}
addr_t SwitchDirectory(addr_t dir)
{
// Don't switch if we are already there.
if ( dir == currentDirPhysical ) { return currentDirPhysical; }
addr_t previous = currentDirPhysical;
asm volatile("mov %0, %%cr3":: "r"(dir));
currentDirPhysical = dir;
Flush();
return previous;
}
addr_t CreateDirectory()
{
// Allocate the thread pages we need, one for the new pagedir,
// and two for the last two 8 MiB of the pagedir.
addr_t newdir = Page::Get();
if ( newdir == 0 ) { return 0; }
addr_t newstructmap1 = Page::Get();
if ( newdir == 0 ) { Page::Put(newdir); return 0; }
addr_t newstructmap2 = Page::Get();
if ( newdir == 0 ) { Page::Put(newdir); Page::Put(newstructmap1); return 0; }
// Map the new pagedir, clone the kernel dir, and change the last
// 8 MiB, such that we can map the new page structures there.
MapKernel((addr_t) makingDir, newdir);
Memory::Copy(makingDir, kernelDir, sizeof(Dir));
makingDir->table[1024-2] = newstructmap1 | DIR_PRESENT | DIR_WRITABLE;
makingDir->table[1024-1] = newstructmap2 | DIR_PRESENT | DIR_WRITABLE;
// Build the new page structures.
MapKernel((addr_t) makingTable, newstructmap1);
Memory::Set(makingTable, 0, sizeof(Table));
makingTable->page[1024-2] = currentTables[1024-2].page[1024-2];
makingTable->page[1024-1] = newdir | TABLE_PRESENT | TABLE_WRITABLE;
// Build the new page structures.
MapKernel((addr_t) makingTable, newstructmap2);
for ( size_t i = 0; i < 1024-2; i++ )
{
makingTable->page[i] = currentTables[1024-1].page[i];
}
makingTable->page[1024-2] = newstructmap1 | TABLE_PRESENT | TABLE_WRITABLE;
makingTable->page[1024-1] = newstructmap2 | TABLE_PRESENT | TABLE_WRITABLE;
return newdir;
}
void MapKernel(addr_t where, addr_t physical)
{
// Make sure we are only mapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
ASSERT(tabledesc != 0);
ASSERT((tabledesc & DIR_USER_SPACE) == 0);
size_t pageid = GetPageId(where);
addr_t pagedesc = physical | TABLE_PRESENT | TABLE_WRITABLE;
currentTables[tableid].page[pageid] = pagedesc;
ASSERT(Lookup(where) == pagedesc);
// TODO: Only update the single page!
Flush();
}
addr_t UnmapKernel(addr_t where)
{
// Make sure we are only unmapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
ASSERT(tabledesc != 0);
ASSERT((tabledesc & DIR_USER_SPACE) == 0);
size_t pageid = GetPageId(where);
addr_t result = currentTables[tableid].page[pageid];
ASSERT((result & TABLE_PRESENT) != 0);
result &= TABLE_ADDRESS;
currentTables[tableid].page[pageid] = 0;
// TODO: Only update the single page!
Flush();
return result;
}
Table* CreateUserTable(addr_t where, bool maycreate)
{
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
Table* table = &(currentTables[tableid]);
if ( tabledesc == 0 )
{
ASSERT(maycreate);
addr_t tablepage = Page::Get();
if ( tablepage == 0 ) { return NULL; }
tabledesc = tablepage | TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE;
currentDir->table[tableid] = tabledesc;
MapKernel((addr_t) table, tablepage);
// TODO: Only update the single page!
Flush();
addr_t lookup = Lookup((addr_t) table) & TABLE_ADDRESS;
ASSERT(lookup == tablepage);
Memory::Set(table, 0, sizeof(Table));
}
// Make sure we only touch dirs permitted for use by user-space!
ASSERT((tabledesc & TABLE_USER_SPACE) != 0);
return table;
}
bool MapUser(addr_t where, addr_t physical)
{
// Make sure we are only mapping user-space-approved pages.
Table* table = CreateUserTable(where, true);
if ( table == NULL ) { return false; }
size_t pageid = GetPageId(where);
addr_t pagedesc = physical | TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE;
table->page[pageid] = pagedesc;
Flush();
ASSERT(Lookup(where) == pagedesc);
// TODO: Only update the single page!
Flush();
return true;
}
addr_t UnmapUser(addr_t where)
{
// Make sure we are only mapping user-space-approved pages.
Table* table = CreateUserTable(where, false);
ASSERT(table != NULL);
size_t pageid = GetPageId(where);
addr_t pagedesc = table->page[pageid];
ASSERT((pagedesc & TABLE_PRESENT) != 0);
addr_t result = pagedesc & TABLE_ADDRESS;
table->page[pageid] = 0;
// TODO: Only update the single page!
Flush();
return result;
}
bool MapRangeKernel(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = Page::Get();
if ( physicalpage == 0 )
{
while ( where < page )
{
page -= 4096UL;
physicalpage = UnmapKernel(page);
Page::Put(physicalpage);
}
return false;
}
MapKernel(page, physicalpage);
}
return true;
}
void UnmapRangeKernel(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = UnmapKernel(page);
Page::Put(physicalpage);
}
}
bool MapRangeUser(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = Page::Get();
if ( physicalpage == 0 || !MapUser(page, physicalpage) )
{
while ( where < page )
{
page -= 4096UL;
physicalpage = UnmapUser(page);
Page::Put(physicalpage);
}
return false;
}
}
return true;
}
void UnmapRangeUser(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = UnmapUser(page);
Page::Put(physicalpage);
}
}
#else
#warning "Virtual Memory is not available on this arch"
addr_t Lookup(addr_t where) { while(true); return 0; }
void Flush() { while(true); }
addr_t CreateAddressSpace() { while(true); return 0; }
addr_t SwitchAddressSpace(addr_t addrspace) { while(true); return 0; }
addr_t SwitchDirectory(addr_t dir) { while(true); return 0; }
addr_t CreateDirectory() { while(true); return 0; }
void MapKernel(addr_t where, addr_t physical) { while(true); }
addr_t UnmapKernel(addr_t where) { while(true); return 0; }
Table* CreateUserTable(addr_t where, bool maycreate) { while(true); return NULL; }
bool MapUser(addr_t where, addr_t physical) { while(true); return false; }
addr_t UnmapUser(addr_t where) { while(true); return 0; }
bool MapRangeKernel(addr_t where, size_t bytes) { while(true); return false; }
void UnmapRangeKernel(addr_t where, size_t bytes) { while(true); }
bool MapRangeUser(addr_t where, size_t bytes) { while(true); return false; }
void UnmapRangeUser(addr_t where, size_t bytes) { while(true); }
#endif
}
}

View File

@ -18,7 +18,7 @@
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.h
Handles memory for the x86 architecture.
Functions that allow modification of virtual memory.
******************************************************************************/
@ -30,328 +30,41 @@ typedef struct multiboot_info multiboot_info_t;
namespace Sortix
{
// This is the physical page allocator API. It splits the physical memory
// of the local machine into chunks known as pages. Each page is usually
// 4096 bytes (depends on your CPU). Pages are page-aligned, meaning they
// all are located on a multiple of the page size (such as 4096 byte).
//
// A long list of memory addresses is used to store and retrieve page
// addresses from. To allocate a physical page of memory, simply call the
// Page::Get() function. When you are done using it, you can free it using
// the Page::Put() function, which makes the page available for other uses.
//
// To use a physical page, the CPU must be in physical mode. Since it is
// undesirable to be in physical mode, using the physical page requires
// using the virtual memory API (see below).
//
// This API completely bypasses the memory swapping system.
//
// If you just want memory for use by the kernel, allocate it using 'new'.
namespace Page
{
// Initializes the paging system. Accepts a multiboot structure
// containing the layout of the physical memory in this machine.
void Init(multiboot_info_t* bootinfo);
// Allocates a physical page and returns its physical address, or
// returns 0 if no page currently is available in the system.
addr_t Get();
// Deallocates a physical page allocated using Get(), which lets the
// the system reuse the page for other purposes. Page must have been
// allocated using Get().
void Put(addr_t page);
// Inserts a physical page into the paging system. This page must not
// have been allocated using Get() and must have been allocated safely
// through other means (such as information provided by the bootloader).
void Insert(addr_t page);
// Rounds a memory address down to nearest page.
inline addr_t AlignDown(addr_t page) { return page & ~(0xFFFUL); }
// Rounds a memory address up to nearest page.
inline addr_t AlignUp(addr_t page) { return AlignDown(page + 0xFFFUL); }
// Retrieves statistics about the current page usage in the system, how
// big pages are, now many are free, and how many are used. Each
// parameter must be a legal pointer to the locations wherein the stats
// will be stored. This function always succeeds.
void GetStats(size_t* pagesize, size_t* numfree, size_t* numused);
}
// This the virtual memory API. Virtual Memory is a clever way to make the
// RAM just as you want it. In effect, it it makes the RAM completely empty
// (there is no RAM). You can then add memory where you desire. You can take
// a physical page and put it in several places, and you can even add
// permissions to it (read-only, read-write, kernel-only). Naturally, the
// amount of physical pages is a limit (out of memory), but using swapping
// and a not-RAM storage unit, we could have potentially much more memory.
//
// There can exist several virtual address spaces, and it is possible for
// them to share physical pages. Each process in the system has its own
// virtual address space, but the kernel is always mapped in each address
// space. While the kernel can access all of the virtual address space,
// user-space programs can only access what they are allowed, cannot access
// the kernel's memory and cannot access each other's address spaces. This
// prevents programs from tampering with each other and from bringing the
// whole system down.
//
// Sortix has several conventions for the layout of the virtual address
// space. The kernel uses the top of the address space, and user-space is
// generally allowed to use the bottom and the middle for stuff such as
// program code, variables, the stack, the heap, and other stuff.
//
// To access physical memory, you must allocate a physical page of memory
// and map it to a virtual address. You can then modify the memory through
// a pointer to that address.
//
// You should select the correct set of functions when writing new code.
// Using the functions incorrectly, using the wrong functions, or mixing
// incompatible functions can lead to gruesome bugs.
//
// For conveniece, the functions have been grouped. Combining (un)mapping
// functions from groups is bad style and is possibly buggy. Assertions may
// be present to detect bad combination.
//
// If you modify kernel virtual pages, then the effects will be share across
// all virtual address spaces.
//
// If you modify user-space virtual pages, then the effects will be limited
// to the current process and its personal virtual address space.
//
// If you just want memory for use by the kernel, allocate it using 'new'.
namespace VirtualMemory
namespace Memory
{
// Initializes the virtual memory system. This bootstraps the kernel
// paging system (if needed) such that the initial kernel's virtual
// address space is created and the current process's page structures
// are mapped to conventional locations.
// This system depends on the physical page allocator being initialized.
void Init();
// Creates a new, fresh address space only containing the kernel memory
// regions, and a fresh user-space frontier of nothingness. The current
// address space remains active and is not modified (besides some kernel
// pages used for this purpose). Returns the physical address of the new
// top level paging structure, which is an opaque identifier as it is
// not identity mapped. Returns 0 if insufficient memory was available.
addr_t CreateAddressSpace();
// Switches the current address space to the virtual address space
// 'addrspace', which must be the result of CreateAddressSpace() or
// another function that creates or copies address spaces.
void Init(multiboot_info_t* bootinfo);
void InvalidatePage(addr_t addr);
void Flush();
addr_t Fork();
addr_t SwitchAddressSpace(addr_t addrspace);
// =====================================================================
// Function Group 1.
// Mapping a single kernel page.
//
// These functions allow you to map a single physical page for use by
// the kernel.
//
// Usage:
//
// addr_t physicalpage = Page::Get();
// if ( physicalpage == 0 ) { /* handle error */ }
//
// const addr_t mapto = 0xF00;
// VirtualMemory::MapKernel(mapto, physicalpage);
//
// /* access the memory */
// char* mem = (char*) mapto;
// mem[0] = 'K';
//
// ...
//
// addr_t physpage = VirtualMemory::UnmapKernel(mapto);
//
// /* when physpage is no longer referred, free it */
// Page::Put(physpage);
// Maps the physical page 'physical' to the virtual address 'where',
// with read and write flags set, but only accessible to the kernel.
// 'where' must be a virtual address in the range available to the
// kernel, and must not currently be used. Given legal input, this
// function will always succeed - illegal input is a gruesome bug.
// 'where' must be page aligned. The effect of this function will be
// shared in all addresses spaces - it is a global change.
// You are allowed to map the same physical page multiple times, even
// with Group 2. functions, just make sure to call the proper Unmap
// functions for each virtual address you map it to, and don't free the
// physical page until it is no longer referred to.
// The virtual page 'where' will point to 'physical' instantly after
// this function returns.
void MapKernel(addr_t where, addr_t physical);
// This function is equal to unmapping 'where', then mapping 'where' to
// 'newphysical' and returns previous physical page 'where' pointed to.
// The same requirements for MapKernel and UnmapKernel applies.
addr_t RemapKernel(addr_t where, addr_t newphysical);
// Unmaps the virtual address 'where' such that it no longer points to
// a valid physical address. Accessing the page at the virtual address
// 'where' will result in an access violation (panicing/crashing the
// kernel). 'where' must be a virtual address already mapped in the
// kernel virtual memory ranges. 'where' must be page aligned.
// 'where' must already be a legal kernel virtual page.
// Returns the address of the physical memory page 'where' points to
// before being cleared. Before returning the physical page to the page
// allocator, make sure that it is not used by other virtual pages.
addr_t UnmapKernel(addr_t where);
// =====================================================================
// Function Group 2.
// Mapping a single user-space page.
//
// These functions allow you to map a single physical page for use by
// user-space programs.
//
// Usage:
//
// addr_t physicalpage = Page::Get();
// if ( physicalpage == 0 ) { /* handle error */ }
//
// const addr_t mapto = 0xF00;
// if ( !VirtualMemory::MapUser(mapto, physicalpage) )
// {
// Page::Put(physicalpage);
// /* handle error */
// }
//
// /* let user-space use memory safely */
//
// addr_t physpage = VirtualMemory::UnmapUser(mapto);
//
// /* when physpage is no longer referred, free it */
// Page::Put(physpage);
// Maps the physical page 'physical' to the virtual address 'where',
// with read and write flags set, accessible to both kernel and user-
// space. 'where' must be a virtual address not in the kernel ranges,
// that is, available to userspace. 'where' must be page aligned and
// not currently used. Returns false if insufficient memory is available
// and returns with the address space left unaltered. Illegal input is
// also a gruesome bug. This function only changes the address space of
// the current process.
// You are allowed to map the same physical page multiple times, even
// with Group 1. functions, just make sure to call the proper Unmap
// functions for each virtual address you map it to, and don't free the
// physical page until it is no longer referred to.
// The virtual page 'where' will point to 'physical' instantly after
// this function returns.
bool MapUser(addr_t where, addr_t physical);
// This function is equal to unmapping 'where', then mapping 'where' to
// 'newphysical' and returns previous physical page 'where' pointed to.
// The same requirements for MapUser and UnmapUser applies.
addr_t RemapUser(addr_t where, addr_t newphysical);
// Unmaps the virtual address 'where' such that it no longer points to
// a valid physical address. Accessing the page at the virtual address
// 'where' will result in an access violation (panicing/crashing the
// program/kernel). 'where' must be a virtual address already mapped
// outside the kernel virtual memory ranges. 'where' must be page
//aligned. 'where' must already be a legal user-space virtual page.
// Returns the address of the physical memory page 'where' points to
// before being cleared. Before returning the physical page to the page
// allocator, make sure that it is not used by other virtual pages.
addr_t UnmapUser(addr_t where);
// =====================================================================
// Function Group 3.
// Mapping a range of kernel pages.
//
// These functions allow you to specify a range of virtual pages that
// shall be usable by the kernel. Memory will be allocated accordingly.
//
// Usage:
//
// const addr_t mapto = 0x4000UL;
// size_t numpages = 8;
// size_t bytes = numpages * 4096UL;
// if ( !VirtualMemory::MapRangeKernel(mapto, bytes) )
// {
// /* handle error */
// }
//
// /* use memory here */
// ...
//
// VirtualMemory::UnmapRangeKernel(mapto, bytes);
// Allocates the needed pages and maps them to 'where'. Returns false if
// not enough memory is available. 'where' must be page aligned. 'bytes'
// need not be page aligned and is rounded up to nearest page. The
// region of 'where' and onwards must not be currently mapped.
// The memory will only be readable and writable by the kernel.
// The memory will be available the instant the function returns.
// The whole region must be within the kernel virtual page ranges.
bool MapRangeKernel(addr_t where, size_t bytes);
// Deallocates the selected pages, and unmaps the selected region.
// 'where' must be paged aligned. 'bytes' need not be page aligned and
// is rounded up to nearest page. Each page in the region must have been
// allocated by MapRangeKernel. You need not free a whole region at once
// and you may even combine pages from adjacent regions.
void UnmapRangeKernel(addr_t where, size_t bytes);
// =====================================================================
// Function Group 4.
// Mapping a range of user-space pages.
//
// These functions allow you to specify a range of virtual pages that
// shall be usable by the user-space. Memory will be allocated
// accordingly.
//
// Usage:
//
// const addr_t mapto = 0x4000UL;
// size_t numpages = 8;
// size_t bytes = numpages * 4096UL;
// if ( !VirtualMemory::MapRangeUser(mapto, bytes) )
// {
// /* handle error */
// }
//
// /* let user-space use memory here */
// ...
//
// VirtualMemory::UnmapRangeUser(mapto, bytes);
// Allocates the needed pages and maps them to 'where'. Returns false if
// not enough memory is available. 'where' must be page aligned. 'bytes'
// need not be page aligned and is rounded up to nearest page. The
// region of 'where' and onwards must not be currently mapped.
// The memory will be readable and writable by user-space.
// The memory will be available the instant the function returns.
// The whole region must be outside the kernel virtual page ranges.
bool MapRangeUser(addr_t where, size_t bytes);
// Deallocates the selected pages, and unmaps the selected region.
// 'where' must be paged aligned. 'bytes' need not be page aligned and
// is rounded up to nearest page. Each page in the region must have been
// allocated by MapRangeUser. You need not free a whole region at once
// and you may even combine pages from adjacent regions.
void UnmapRangeUser(addr_t where, size_t bytes);
bool MapKernel(addr_t physical, addr_t mapto);
bool MapUser(addr_t physical, addr_t mapto);
addr_t UnmapKernel(addr_t mapto);
addr_t UnmapUser(addr_t mapto);
#ifdef PLATFORM_X86
// Define where the kernel heap is located, used by the heap code.
const addr_t heapLower = 0x80000000UL;
const addr_t heapUpper = 0xFF800000UL;
// Physical pages may be safely temporarily mapped to this address and a
// good dozen of pages onwards. Beware that this is only meant to be
// a temporary place to put memory.
const addr_t tempaddr = 0xFF800000UL;
#if defined(PLATFORM_X86)
const addr_t HEAPLOWER = 0x80000000UL;
const addr_t HEAPUPPER = 0xFF800000UL;
#elif defined(PLATFORM_X64)
// This isn't supported yet, so just use random values.
const addr_t heapLower = 0x80000000UL;
const addr_t heapUpper = 0xFF800000UL;
const addr_t tempaddr = 0xFF800000UL;
const addr_t HEAPLOWER = 0xFFFF800000000000UL;
const addr_t HEAPUPPER = 0xFFFFFE8000000000UL;
#endif
}
}

View File

@ -70,7 +70,7 @@ namespace Sortix
ProcessSegment* tmp = segments;
while ( tmp != NULL )
{
VirtualMemory::UnmapRangeUser(tmp->position, tmp->size);
Memory::UnmapRangeUser(tmp->position, tmp->size);
ProcessSegment* todelete = tmp;
tmp = tmp->next;
delete todelete;

View File

@ -202,7 +202,7 @@ namespace Sortix
AllocatedThreadId = 1;
// Create an address space for the idle process.
addr_t noopaddrspace = VirtualMemory::CreateAddressSpace();
addr_t noopaddrspace = Memory::Fork();
// Create the noop process.
Process* noopprocess = new Process(noopaddrspace);
@ -225,7 +225,8 @@ namespace Sortix
uintptr_t MapTo = 0x80000000;
VirtualMemory::MapKernel(MapTo, (uintptr_t) KernelStackPage);
Memory::MapKernel((addr_t) KernelStackPage, MapTo);
Memory::InvalidatePage(KernelStackPage);
GDT::SetKernelStack((size_t*) (MapTo+4096));
}
@ -271,9 +272,9 @@ namespace Sortix
uintptr_t StackPos = 0x80000000UL;
uintptr_t MapTo = StackPos - 4096UL;
addr_t OldAddrSpace = VirtualMemory::SwitchAddressSpace(Process->GetAddressSpace());
addr_t OldAddrSpace = Memory::SwitchAddressSpace(Process->GetAddressSpace());
VirtualMemory::MapUser(MapTo, PhysStack);
Memory::MapUser(PhysStack, MapTo);
size_t* Stack = (size_t*) StackPos;
// Prepare the parameters for the entry function (C calling convention).
@ -290,7 +291,7 @@ namespace Sortix
thread->SetState(Thread::State::RUNNABLE);
// Avoid side effects by restoring the old address space.
VirtualMemory::SwitchAddressSpace(OldAddrSpace);
Memory::SwitchAddressSpace(OldAddrSpace);
#endif
@ -354,7 +355,7 @@ namespace Sortix
}
// If applicable, switch the virtual address space.
VirtualMemory::SwitchAddressSpace(NextThread->GetProcess()->GetAddressSpace());
Memory::SwitchAddressSpace(NextThread->GetProcess()->GetAddressSpace());
currentThread = NextThread;

View File

@ -72,17 +72,17 @@ namespace Sortix
// TODO: Check if mapto collides with any other memory section!
if ( !VirtualMemory::MapUser(mapto, page) )
if ( !Memory::MapUser(page, mapto) )
{
Page::Put(page); R->eax = 0; return;
}
Memory::Set(userframe, 0, sizeof(UserFrame));
Maxsi::Memory::Set(userframe, 0, sizeof(UserFrame));
DevVGAFrame* frame = new DevVGAFrame();
if ( frame == NULL )
{
VirtualMemory::UnmapUser(mapto);
Memory::UnmapUser(mapto);
Page::Put(page); R->eax = 0; return;
}
@ -90,7 +90,7 @@ namespace Sortix
if ( fd < 0 )
{
delete frame;
VirtualMemory::UnmapUser(mapto);
Memory::UnmapUser(mapto);
Page::Put(page); R->eax = 0; return;
}
@ -142,31 +142,33 @@ namespace Sortix
if ( currentframe->process != process )
{
VirtualMemory::SwitchAddressSpace(currentframe->process->GetAddressSpace());
Memory::SwitchAddressSpace(currentframe->process->GetAddressSpace());
}
// Remap the pages in the owning process.
// TODO: Check if userframe is actually user-space writable!
VirtualMemory::UnmapUser((addr_t) currentframe->userframe);
VirtualMemory::MapUser((addr_t) currentframe->userframe, currentframe->physical);
Memory::UnmapUser((addr_t) currentframe->userframe);
Memory::MapUser(currentframe->physical, (addr_t) currentframe->userframe);
Memory::InvalidatePage((addr_t) frame->userframe);
// Restore the contents of this frame to the VGA framebuffer.
Memory::Copy(currentframe->userframe, vga, sizeof(UserFrame));
Maxsi::Memory::Copy(currentframe->userframe, vga, sizeof(UserFrame));
if ( currentframe->process != process )
{
VirtualMemory::SwitchAddressSpace(process->GetAddressSpace());
Memory::SwitchAddressSpace(process->GetAddressSpace());
}
currentframe->onscreen = false;
}
// Now move the contents of this frame to the VGA framebuffer.
Memory::Copy(vga, frame->userframe, sizeof(UserFrame));
Maxsi::Memory::Copy(vga, frame->userframe, sizeof(UserFrame));
// Remap the pages such that the current process now uses the vga.
VirtualMemory::UnmapUser((addr_t) frame->userframe);
VirtualMemory::MapUser((addr_t) frame->userframe, (addr_t) vga);
Memory::UnmapUser((addr_t) frame->userframe);
Memory::MapUser((addr_t) vga, (addr_t) frame->userframe);
Memory::InvalidatePage((addr_t) frame->userframe);
frame->onscreen = true;
currentframe = frame;
@ -201,7 +203,7 @@ namespace Sortix
DevVGAFrame::~DevVGAFrame()
{
if ( process != NULL ) { ASSERT(CurrentProcess() == process); }
if ( userframe != NULL ) { VirtualMemory::UnmapUser((addr_t) userframe); }
if ( userframe != NULL ) { Memory::UnmapUser((addr_t) userframe); Memory::InvalidatePage((addr_t) userframe); }
if ( physical != 0 ) { Page::Put(physical); }
}

View File

@ -58,24 +58,29 @@ multiboot_entry:
# Store the magic value.
mov %eax, 0x100004
# Clear the first 4096*4 bytes following 0x1000.
# Clear the first $0xE000 bytes following 0x1000.
movl $0x1000, %edi
mov %edi, %cr3
xorl %eax, %eax
movl $4096, %ecx
movl $0xE000, %ecx
rep stosl
movl %cr3, %edi
# Set the initial page tables.
# Page-Map Level 4
movl $0x2003, (%edi)
addl $0x1000, %edi
# Page-Directory Pointer Table
movl $0x3003, (%edi)
addl $0x1000, %edi
# Page-Directory
movl $0x4003, (%edi)
addl $0x1000, %edi
# Page-Table
# Memory map the first 2 MiB.
movl $0x3, %ebx
movl $512, %ecx

View File

@ -0,0 +1,110 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.cpp
Handles memory for the x64 architecture.
******************************************************************************/
#include "platform.h"
#include <libmaxsi/memory.h>
#include "multiboot.h"
#include "panic.h"
#include "../memorymanagement.h"
#include "x86-family/memorymanagement.h"
namespace Sortix
{
namespace Page
{
extern size_t stackused;
extern size_t stacklength;
}
namespace Memory
{
extern addr_t currentdir;
void InitCPU()
{
// The x64 boot code already set up virtual memory and identity
// mapped the first 2 MiB. This code finishes the job such that
// virtual memory is fully usable and manageable.
// boot.s already initialized everything from 0x1000UL to 0xE000UL
// to zeroes. Since these structures are already used, doing it here
// will be very dangerous.
PML* const BOOTPML4 = (PML* const) 0x01000UL;
PML* const BOOTPML3 = (PML* const) 0x06000UL;
PML* const BOOTPML2 = (PML* const) 0x07000UL;
PML* const BOOTPML1 = (PML* const) 0x08000UL;
// First order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
addr_t flags = PML_PRESENT | PML_WRITABLE;
// Fractal map the PML1s.
BOOTPML4->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML2s.
BOOTPML4->entry[510] = (addr_t) BOOTPML3 | flags | PML_FORK;
BOOTPML3->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML3s.
BOOTPML3->entry[510] = (addr_t) BOOTPML2 | flags | PML_FORK;
BOOTPML2->entry[511] = (addr_t) BOOTPML4 | flags;
// Fractal map the PML4s.
BOOTPML2->entry[510] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[511] = (addr_t) BOOTPML4 | flags;
// Add some predefined room for forking address spaces.
PML* const FORKPML2 = (PML* const) 0x09000UL;
PML* const FORKPML1 = (PML* const) 0x0A000UL;
BOOTPML3->entry[0] = (addr_t) FORKPML2 | flags | PML_FORK;
BOOTPML2->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK;
currentdir = (addr_t) BOOTPML4;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML3 = (PML* const) 0x0B000UL;
PML* const PHYSPML2 = (PML* const) 0x0C000UL;
PML* const PHYSPML1 = (PML* const) 0x0D000UL;
PML* const PHYSPML0 = (PML* const) 0x0E000UL;
BOOTPML4->entry[509] = (addr_t) PHYSPML3 | flags;
PHYSPML3->entry[0] = (addr_t) PHYSPML2 | flags;
PHYSPML2->entry[0] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
}
}

View File

@ -0,0 +1,56 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.h
Handles memory for the x64 architecture.
******************************************************************************/
#ifndef SORTIX_X64_MEMORYMANAGEMENT_H
#define SORTIX_X64_MEMORYMANAGEMENT_H
namespace Sortix
{
namespace Memory
{
const size_t TOPPMLLEVEL = 4;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 9;
PML* const PMLS[TOPPMLLEVEL + 1] =
{
(PML* const) 0x0,
(PML* const) 0xFFFFFF8000000000UL,
(PML* const) 0xFFFFFF7FC0000000UL,
(PML* const) 0XFFFFFF7FBFE00000UL,
(PML* const) 0xFFFFFF7FBFDFF000UL,
};
PML* const FORKPML = (PML* const) 0xFFFFFF0000000000UL;
}
namespace Page
{
addr_t* const STACK = (addr_t* const) 0xFFFFFE8000000000UL;
const size_t MAXSTACKSIZE = (512UL*1024UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
}
}
#endif

View File

@ -0,0 +1,544 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.cpp
Handles memory for the x86 family of architectures.
******************************************************************************/
#include "platform.h"
#include <libmaxsi/memory.h>
#include "multiboot.h"
#include "panic.h"
#include "../memorymanagement.h"
#include "memorymanagement.h"
namespace Sortix
{
const addr_t KERNELEND = 0x200000UL;
namespace Page
{
void InitPushRegion(addr_t position, size_t length);
size_t pagesnotonstack;
size_t stackused;
size_t stacklength;
}
namespace Memory
{
addr_t currentdir;
void InitCPU();
void AllocateKernelPMLs();
void Init(multiboot_info_t* bootinfo)
{
Page::pagesnotonstack = 0;
if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) )
{
Panic("memorymanagement.cpp: The memory map flag was't set in "
"the multiboot structure. Are your bootloader multiboot "
"specification compliant?");
}
// Initialize CPU-specific things.
InitCPU();
typedef const multiboot_memory_map_t* mmap_t;
// Loop over every detected memory region.
for (
mmap_t mmap = (mmap_t) bootinfo->mmap_addr;
(addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length;
mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size))
)
{
// Check that we can use this kind of RAM.
if ( mmap->type != 1 ) { continue; }
// The kernels code may split this memory area into multiple pieces.
addr_t base = (addr_t) mmap->addr;
size_t length = mmap->len;
#ifdef PLATFORM_X86
// Figure out if the memory area is addressable (are our pointers big enough?)
if ( 0xFFFFFFFFULL < mmap->addr ) { continue; }
if ( 0xFFFFFFFFULL < mmap->addr + mmap->len ) { length = 0x100000000ULL - mmap->addr; }
#endif
// Detect if this memory is completely covered by the kernel.
if ( base + length <= KERNELEND ) { continue; }
// Detect if this memory is partially covered by the kernel.
if ( base <= KERNELEND && KERNELEND <= base + length )
{
length = (base + length) - KERNELEND;
base = KERNELEND;
}
Page::InitPushRegion(base, length);
}
// If the physical allocator couldn't handle the vast amount of
// physical pages, it may decide to drop some. This shouldn't happen
// until the pebibyte era of RAM.
if ( 0 < Page::pagesnotonstack )
{
Log::PrintF("%zu bytes of RAM aren't used due to technical"
"restrictions.\n", Page::pagesnotonstack * 0x1000UL);
}
// Finish allocating the top level PMLs for the kernels use.
AllocateKernelPMLs();
}
// Prepare the non-forkable kernel PMLs such that forking the kernel
// address space will always keep the kernel mapped.
void AllocateKernelPMLs()
{
const addr_t flags = PML_PRESENT | PML_WRITABLE;
PML* const pml = PMLS[TOPPMLLEVEL];
size_t start = ENTRIES / 2;
size_t end = ENTRIES;
for ( size_t i = start; i < end; i++ )
{
if ( pml->entry[i] & PML_PRESENT ) { continue; }
addr_t page = Page::Get();
if ( !page ) { Panic("out of memory allocating boot PMLs"); }
pml->entry[i] = page | flags;
}
}
}
namespace Page
{
void ExtendStack()
{
// This call will always succeed, if it didn't, then the stack
// wouldn't be full, and thus this function won't be called.
addr_t page = Get();
// This call will also succeed, since there are plenty of physical
// pages available and it might need some.
Memory::MapKernel(page, (addr_t) (STACK + stacklength));
// TODO: This may not be needed during the boot process!
//Memory::InvalidatePage((addr_t) (STACK + stacklength));
stacklength += 4096UL / sizeof(addr_t);
}
void InitPushRegion(addr_t position, size_t length)
{
// Align our entries on page boundaries.
addr_t newposition = Page::AlignUp(position);
length = Page::AlignDown((position + length) - newposition);
position = newposition;
while ( length )
{
if ( unlikely(stackused == stacklength) )
{
if ( stackused == MAXSTACKLENGTH )
{
pagesnotonstack += length / 4096UL;
return;
}
ExtendStack();
}
STACK[stackused++] = position;
length -= 4096UL;
position += 4096UL;
}
}
addr_t Get()
{
// TODO: Set out of memory errno here!
if ( unlikely(stackused == 0) ) { return 0; }
return STACK[--stackused];
}
void Put(addr_t page)
{
ASSERT(stackused < MAXSTACKLENGTH);
STACK[stackused++] = page;
}
}
namespace Memory
{
void InvalidatePage(addr_t /*addr*/)
{
// TODO: Actually just call the instruction.
Flush();
}
// Flushes the Translation Lookaside Buffer (TLB).
void Flush()
{
asm volatile("mov %0, %%cr3":: "r"(currentdir));
}
addr_t SwitchAddressSpace(addr_t addrspace)
{
// Don't switch if we are already there.
if ( addrspace == currentdir ) { return currentdir; }
addr_t previous = currentdir;
// Swtich and flush the TLB.
asm volatile("mov %0, %%cr3":: "r"(addrspace));
currentdir = addrspace;
return previous;
}
bool MapRangeKernel(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = Page::Get();
if ( physicalpage == 0 )
{
while ( where < page )
{
page -= 4096UL;
physicalpage = UnmapKernel(page);
Page::Put(physicalpage);
}
return false;
}
MapKernel(physicalpage, page);
}
return true;
}
void UnmapRangeKernel(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = UnmapKernel(page);
Page::Put(physicalpage);
}
}
bool MapRangeUser(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = Page::Get();
if ( physicalpage == 0 || !MapUser(physicalpage, page) )
{
while ( where < page )
{
page -= 4096UL;
physicalpage = UnmapUser(page);
Page::Put(physicalpage);
}
return false;
}
}
return true;
}
void UnmapRangeUser(addr_t where, size_t bytes)
{
for ( addr_t page = where; page < where + bytes; page += 4096UL )
{
addr_t physicalpage = UnmapUser(page);
Page::Put(physicalpage);
}
}
template <bool userspace, bool invalidate>
bool Map(addr_t physical, addr_t mapto)
{
const addr_t userflags = userspace ? (PML_USERSPACE | PML_FORK) : 0;
const addr_t flags = userflags | PML_PRESENT | PML_WRITABLE;
const size_t MASK = (1<<TRANSBITS)-1;
size_t pmlchildid[TOPPMLLEVEL + 1];
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
{
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
}
size_t offset = 0;
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
{
size_t childid = pmlchildid[i];
PML* pml = PMLS[i] + offset;
addr_t& entry = pml->entry[childid];
if ( !(entry & PML_PRESENT) )
{
// TODO: Possible memory leak when page allocation fails.
addr_t page = Page::Get();
if ( page == 0 ) { return false; }
entry = page | flags;
// Invalidate the new PML and reset it to zeroes.
addr_t pmladdr = (addr_t) (PMLS[i-1] + childid);
InvalidatePage(pmladdr);
Maxsi::Memory::Set((void*) pmladdr, 0, sizeof(PML));
}
else if ( userspace && !(entry & PML_USERSPACE) )
{
PanicF("attempted to map physical page %p to virtual page "
"%p with userspace permissions, but the virtual page"
"wasn't in an userspace PML[%zu]. This is a bug in the"
"code calling this function", physical, mapto, i-1);
}
offset = offset * ENTRIES + childid;
}
(PMLS[1] + offset)->entry[pmlchildid[1]] = physical | flags;
if ( invalidate )
{
InvalidatePage(mapto);
}
return true;
}
template <bool userspace, bool invalidate>
addr_t Unmap(addr_t mapto)
{
const size_t MASK = (1<<TRANSBITS)-1;
size_t pmlchildid[TOPPMLLEVEL + 1];
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
{
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
}
size_t offset = 0;
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
{
size_t childid = pmlchildid[i];
PML* pml = PMLS[i] + offset;
addr_t& entry = pml->entry[childid];
if ( !(entry & PML_PRESENT) )
{
PanicF("attempted to unmap virtual page %p with userspace, "
" but the virtual page wasn't mapped. This is a bug "
"in the code calling this function", mapto);
}
else if ( userspace && !(entry & PML_USERSPACE) )
{
PanicF("attempted to unmap virtual page %p it wasn't in an"
"userspace PML[%zu]. This is a bug in the code"
"calling this function", mapto, i-1);
}
offset = offset * ENTRIES + childid;
}
addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
addr_t result = entry & PML_ADDRESS;
entry = 0;
// TODO: If all the entries in PML[N] are not-present, then who
// unmaps its entry from PML[N-1]?
if ( invalidate )
{
Flush();
}
return result;
}
bool MapKernel(addr_t physical, addr_t mapto)
{
return Map<false, false>(physical, mapto);
}
bool MapUser(addr_t physical, addr_t mapto)
{
return Map<true, false>(physical, mapto);
}
addr_t UnmapKernel(addr_t mapto)
{
return Unmap<false, false>(mapto);
}
addr_t UnmapUser(addr_t mapto)
{
return Unmap<true, false>(mapto);
}
// TODO: Copying every frame is endlessly useless in many uses. It'd be
// nice to upgrade this to a copy-on-demand algorithm.
addr_t Fork()
{
addr_t newtoppmladdr = Page::Get();
if ( newtoppmladdr == 0 ) { return 0; }
// This is either bad code or very clever code and probably is both.
size_t positionstack[TOPPMLLEVEL+1];
positionstack[TOPPMLLEVEL] = 0;
size_t level = TOPPMLLEVEL;
size_t pmloffset = 0;
bool failure = false;
// This call always succeeds.
MapKernel(newtoppmladdr, (addr_t) (FORKPML + level));
InvalidatePage((addr_t) (FORKPML + level));
while ( positionstack[TOPPMLLEVEL] < ENTRIES )
{
if ( level == 1 )
{
//Log::PrintF("[%zu > %zu]", positionstack[2], positionstack[1]);
}
else
{
//Log::PrintF("[%zu]", positionstack[2]);
}
const size_t pos = positionstack[level];
if ( pos == ENTRIES )
{
//Log::PrintF(" done with level\n");
(positionstack[++level])++;
pmloffset /= ENTRIES;
continue;
}
addr_t entry = (PMLS[level] + pmloffset)->entry[pos];
// If the entry should be forked, fork it!
if ( likely(entry & PML_FORK) )
{
// Pop the physical address of somewhere unused.
addr_t phys = Page::Get();
if ( unlikely(phys == 0) )
{
//Log::PrintF(" out of memory!\n");
// Oh no. Out of memory! We'll have to undo everything
// we just did. Argh!
failure = true;
break;
}
// Map the destination page.
addr_t destaddr = (addr_t) (FORKPML + level-1);
MapKernel(phys, destaddr);
InvalidatePage(destaddr);
// Set its entry in the owner.
addr_t flags = entry & PML_FLAGS;
(FORKPML + level)->entry[pos] = phys | flags;
if ( level == 1 )
{
//Log::PrintF(" copy\n");
// Determine the source page's address.
const void* src = (const void*) (pmloffset * 4096UL);
// Determine the destination page's address.
void* dest = (void*) (FORKPML + level - 1);
Maxsi::Memory::Copy(dest, src, sizeof(PML));
}
else
{
//Log::PrintF(" recurse\n");
// Fork the PML recursively!
pmloffset = pmloffset * ENTRIES + pos;
positionstack[--level] = 0;
continue;
}
}
// If this entry should be linked, link it.
else
{
//Log::PrintF(" link\n");
FORKPML[level].entry[pos] = entry;
}
positionstack[level]++;
}
//Log::PrintF("Fork: Loop Terminated\n");
if ( !failure )
{
// Now, the new top pml needs to have its fractal memory fixed.
const addr_t flags = PML_PRESENT | PML_WRITABLE;
addr_t mapto;
addr_t childaddr;
(FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = newtoppmladdr | flags;
childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS;
for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- )
{
mapto = (addr_t) (FORKPML + i);
MapKernel(childaddr, mapto);
InvalidatePage(mapto);
(FORKPML + i)->entry[ENTRIES-1] = newtoppmladdr | flags;
childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS;
}
//Log::PrintF("Fork: Done\n");
return newtoppmladdr;
}
// The fork failed, so'll have to clean up the new address space and
// free all the pages we forked so far. It'd be nice to detect that
// this would happen early on, but it seems to impractical or
// inefficient. Let's just do the dirty work and clean up.
// TODO: Fix this error condition by deleting the new pages.
Panic("Out of memory during fork. This isn't supported yet.");
return 0;
}
}
}

View File

@ -0,0 +1,57 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.h
Handles memory for the x86 family of architectures.
******************************************************************************/
#ifndef SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H
#define SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H
namespace Sortix
{
struct PML
{
addr_t entry[4096 / sizeof(addr_t)];
};
namespace Memory
{
const addr_t PML_PRESENT = (1<<0);
const addr_t PML_WRITABLE = (1<<1);
const addr_t PML_USERSPACE = (1<<2);
const addr_t PML_AVAILABLE1 = (1<<9);
const addr_t PML_AVAILABLE2 = (1<<10);
const addr_t PML_AVAILABLE3 = (1<<11);
const addr_t PML_FORK = PML_AVAILABLE1;
const addr_t PML_FLAGS = (0xFFFUL); // Bits used for the flags.
const addr_t PML_ADDRESS = (~0xFFFUL); // Bits used for the address.
}
}
#ifdef PLATFORM_X86
#include "../x86/memorymanagement.h"
#endif
#ifdef PLATFORM_X64
#include "../x64/memorymanagement.h"
#endif
#endif

View File

@ -0,0 +1,104 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.cpp
Handles memory for the x86 architecture.
******************************************************************************/
#include "platform.h"
#include <libmaxsi/memory.h>
#include "multiboot.h"
#include "panic.h"
#include "../memorymanagement.h"
#include "x86-family/memorymanagement.h"
namespace Sortix
{
namespace Page
{
extern size_t stackused;
extern size_t stacklength;
}
namespace Memory
{
extern addr_t currentdir;
void InitCPU()
{
PML* const BOOTPML2 = (PML* const) 0x01000UL;
PML* const BOOTPML1 = (PML* const) 0x02000UL;
PML* const FORKPML1 = (PML* const) 0x03000UL;
PML* const IDENPML1 = (PML* const) 0x04000UL;
// Initialize the memory structures with zeroes.
Maxsi::Memory::Set(BOOTPML1, 0, 0x6000UL);
// Identity map the first 4 MiB.
addr_t flags = PML_PRESENT | PML_WRITABLE;
BOOTPML2->entry[0] = ((addr_t) IDENPML1) | flags;
for ( size_t i = 0; i < ENTRIES; i++ )
{
IDENPML1->entry[i] = (i * 4096UL) | flags;
}
// Next order of business is to map the virtual memory structures
// to the pre-defined locations in the virtual address space.
// Fractal map the PML1s.
BOOTPML2->entry[1023] = (addr_t) BOOTPML2 | flags;
// Fractal map the PML2s.
BOOTPML2->entry[1022] = (addr_t) BOOTPML1 | flags | PML_FORK;
BOOTPML1->entry[1023] = (addr_t) BOOTPML2 | flags;
// Add some predefined room for forking address spaces.
BOOTPML1->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK;
// The virtual memory structures are now available on the predefined
// locations. This means the virtual memory code is bootstrapped. Of
// course, we still have no physical page allocator, so that's the
// next step.
PML* const PHYSPML1 = (PML* const) 0x05000UL;
PML* const PHYSPML0 = (PML* const) 0x06000UL;
BOOTPML2->entry[1021] = (addr_t) PHYSPML1 | flags;
PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags;
// Alright, enable virtual memory!
SwitchAddressSpace((addr_t) BOOTPML2);
size_t cr0;
asm volatile("mov %%cr0, %0": "=r"(cr0));
cr0 |= 0x80000000UL; /* Enable paging! */
asm volatile("mov %0, %%cr0":: "r"(cr0));
Page::stackused = 0;
Page::stacklength = 4096UL / sizeof(addr_t);
// The physical memory allocator should now be ready for use. Next
// up, the calling function will fill up the physical allocator with
// plenty of nice physical pages. (see Page::InitPushRegion)
}
}
}

View File

@ -0,0 +1,54 @@
/******************************************************************************
COPYRIGHT(C) JONAS 'SORTIE' TERMANSEN 2011.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with Sortix. If not, see <http://www.gnu.org/licenses/>.
memorymanagement.h
Handles memory for the x64 architecture.
******************************************************************************/
#ifndef SORTIX_X64_MEMORYMANAGEMENT_H
#define SORTIX_X64_MEMORYMANAGEMENT_H
namespace Sortix
{
namespace Memory
{
const size_t TOPPMLLEVEL = 2;
const size_t ENTRIES = 4096UL / sizeof(addr_t);
const size_t TRANSBITS = 10;
PML* const PMLS[TOPPMLLEVEL + 1] =
{
(PML* const) 0x0,
(PML* const) 0xFFC00000UL,
(PML* const) 0xFFBFF000UL,
};
PML* const FORKPML = (PML* const) 0xFF800000UL;
}
namespace Page
{
addr_t* const STACK = (addr_t* const) 0xFF400000UL;
const size_t MAXSTACKSIZE = (4UL*1024UL*1024UL);
const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t);
}
}
#endif