Refactored virtual memory management, making it less buggy.

uintptr_t is now replaced with addr_t when referring to physical memory
addresses in Sortix. Many bugs in the previous memory management code have been
fixed. The new interface is less flexible - but should prove more solid as the
nasty internals have been hidden away. The current interface design should also
make the code more 64-bit ready/friendly. And so on.
This commit is contained in:
Jonas 'Sortie' Termansen 2011-08-07 00:18:41 +02:00
parent 9b79673dcb
commit 66c058fba1
9 changed files with 361 additions and 375 deletions

View File

@ -36,15 +36,6 @@
#include <sortix/memorymanagement.h> #include <sortix/memorymanagement.h>
#include <sortix/panic.h> #include <sortix/panic.h>
namespace Sortix
{
namespace VirtualMemory
{
extern uintptr_t KernelHalfStart;
extern uintptr_t KernelHalfEnd;
}
}
#endif #endif
#define IsGoodChunkPosition(Chunk) ((uintptr_t) Wilderness + WildernessSize <= (uintptr_t) (Chunk) && (uintptr_t) (Chunk) <= (uintptr_t) HeapStart) #define IsGoodChunkPosition(Chunk) ((uintptr_t) Wilderness + WildernessSize <= (uintptr_t) (Chunk) && (uintptr_t) (Chunk) <= (uintptr_t) HeapStart)
@ -188,7 +179,7 @@ namespace Maxsi
{ {
Memory::Set(Bins, 0, sizeof(Bins)); Memory::Set(Bins, 0, sizeof(Bins));
#ifdef SORTIX_KERNEL #ifdef SORTIX_KERNEL
Wilderness = (byte*) Sortix::VirtualMemory::KernelHalfEnd; Wilderness = (byte*) Sortix::VirtualMemory::heapUpper;
#else #else
// TODO: This is very 32-bit specific! // TODO: This is very 32-bit specific!
Wilderness = (byte*) 0x80000000; Wilderness = (byte*) 0x80000000;
@ -210,7 +201,7 @@ namespace Maxsi
#ifdef SORTIX_KERNEL #ifdef SORTIX_KERNEL
// Check if the wilderness would grow larger than the kernel memory area. // Check if the wilderness would grow larger than the kernel memory area.
if ( ( ((uintptr_t) Wilderness) - Sortix::VirtualMemory::KernelHalfStart ) < NeededSize ) { return false; } if ( ( ((uintptr_t) Wilderness) - Sortix::VirtualMemory::heapLower ) < NeededSize ) { return false; }
#endif #endif
// Figure out how where the new wilderness will be. // Figure out how where the new wilderness will be.
@ -244,9 +235,9 @@ namespace Maxsi
PagesLeft--; PagesLeft--;
// Get a raw unused physical page. // Get a raw unused physical page.
void* Page = Sortix::Page::Get(); addr_t Page = Sortix::Page::Get();
if ( Page == NULL ) if ( Page == 0 )
{ {
// If none is available, simply let the allocation fail // If none is available, simply let the allocation fail
// and unallocate everything we did allocate so far. // and unallocate everything we did allocate so far.
@ -254,21 +245,17 @@ namespace Maxsi
{ {
PagesLeft++; PagesLeft++;
uintptr_t OldVirtual = NewWilderness + 4096 * PagesLeft; addr_t OldVirtual = NewWilderness + 4096 * PagesLeft;
void* OldPage = Sortix::VirtualMemory::LookupAddr(OldVirtual); addr_t OldPage = Sortix::VirtualMemory::UnmapKernel(OldVirtual);
Sortix::VirtualMemory::Map(0, OldVirtual, 0);
Sortix::Page::Put(OldPage); Sortix::Page::Put(OldPage);
} }
// Flash the TLB to restore everything safely.
Sortix::VirtualMemory::Flush();
return false; return false;
} }
// Map the physical page to a virtual one. // Map the physical page to a virtual one.
uintptr_t VirtualAddr = NewWilderness + 4096 * PagesLeft; addr_t VirtualAddr = NewWilderness + 4096 * PagesLeft;
Sortix::VirtualMemory::Map((uintptr_t) Page, VirtualAddr, TABLE_PRESENT | TABLE_WRITABLE); Sortix::VirtualMemory::MapKernel(VirtualAddr, Page);
} }
// Now flush the TLB such that the new pages can be safely used. // Now flush the TLB such that the new pages can be safely used.

View File

@ -34,7 +34,7 @@ const char* exceptions[] = { "Divide by zero", "Debug", "Non maskable interrupt"
if ( Regs->int_no < 32 ) if ( Regs->int_no < 32 )
{ {
const char* message = ( Regs->int_no < numknownexceptions ) ? exceptions[Regs->int_no] : "Unknown"; const char* message = ( Regs->int_no < numknownexceptions ) ? exceptions[Regs->int_no] : "Unknown";
Sortix::PanicF("Unhandled CPU Exception id %zu '%s' at eip=0x%zx (cr2=0x%p)", Regs->int_no, message, Regs->eip, Regs->cr2); Sortix::PanicF("Unhandled CPU Exception id %zu '%s' at eip=0x%zx (cr2=0x%p, err_code=0x%p)", Regs->int_no, message, Regs->eip, Regs->cr2, Regs->err_code);
} }
if ( interrupt_handlers[Regs->int_no] != NULL ) if ( interrupt_handlers[Regs->int_no] != NULL )

View File

@ -241,23 +241,21 @@ namespace Sortix
Thread::Entry initstart = RunApplication; Thread::Entry initstart = RunApplication;
// TODO: Create a new page directory here for the first process!
if ( initrd != NULL ) if ( initrd != NULL )
{ {
void* loadptr = (void*) 0x400000; addr_t loadat = 0x400000UL;
uintptr_t loadint = (uintptr_t) loadptr;
#ifdef PLATFORM_VIRTUAL_MEMORY #ifdef PLATFORM_VIRTUAL_MEMORY
ASSERT(initrdsize <= 4096); ASSERT(initrdsize <= 4096);
void* apppageptr = Page::Get(); addr_t apppage = Page::Get();
uintptr_t apppageint = (uintptr_t) apppageptr;
uintptr_t flags = TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE; VirtualMemory::MapUser(loadat, apppage);
VirtualMemory::Map(apppageint, loadint, flags);
VirtualMemory::Flush();
#endif #endif
Memory::Copy(loadptr, initrd, initrdsize); Memory::Copy((void*) loadat, initrd, initrdsize);
initstart = (Thread::Entry) loadptr; initstart = (Thread::Entry) loadat;
} }
if ( Scheduler::CreateThread(NULL, initstart) == NULL ) if ( Scheduler::CreateThread(NULL, initstart) == NULL )

View File

@ -39,9 +39,9 @@ using namespace Maxsi;
namespace Sortix namespace Sortix
{ {
const uintptr_t KernelStart = 0x000000; const addr_t KernelStart = 0x000000UL;
const size_t KernelLength = 0x200000; const size_t KernelLength = 0x200000UL;
const size_t KernelLeadingPages = KernelLength / 0x1000; const size_t KernelLeadingPages = KernelLength / 0x1000UL;
namespace Page namespace Page
{ {
@ -163,329 +163,367 @@ namespace Sortix
namespace VirtualMemory namespace VirtualMemory
{ {
const size_t TABLE_PRESENT = (1<<0);
const size_t TABLE_WRITABLE = (1<<1);
const size_t TABLE_USER_SPACE = (1<<2);
const size_t TABLE_RESERVED1 = (1<<3); // Used internally by the CPU.
const size_t TABLE_RESERVED2 = (1<<4); // Used internally by the CPU.
const size_t TABLE_ACCESSED = (1<<5);
const size_t TABLE_DIRTY = (1<<6);
const size_t TABLE_RESERVED3 = (1<<7); // Used internally by the CPU.
const size_t TABLE_RESERVED4 = (1<<8); // Used internally by the CPU.
const size_t TABLE_AVAILABLE1 = (1<<9);
const size_t TABLE_AVAILABLE2 = (1<<10);
const size_t TABLE_AVAILABLE3 = (1<<11);
const size_t TABLE_FLAGS = (0xFFFUL); // Bits used for the flags.
const size_t TABLE_ADDRESS = (~0xFFFUL); // Bits used for the address.
const size_t DIR_PRESENT = (1<<0);
const size_t DIR_WRITABLE = (1<<1);
const size_t DIR_USER_SPACE = (1<<2);
const size_t DIR_WRITE_THROUGH = (1<<3);
const size_t DIR_DISABLE_CACHE = (1<<4);
const size_t DIR_ACCESSED = (1<<5);
const size_t DIR_RESERVED1 = (1<<6);
const size_t DIR_4MIB_PAGES = (1<<7);
const size_t DIR_RESERVED2 = (1<<8);
const size_t DIR_AVAILABLE1 = (1<<9);
const size_t DIR_AVAILABLE2 = (1<<10);
const size_t DIR_AVAILABLE3 = (1<<11);
const size_t DIR_FLAGS = (0xFFFUL); // Bits used for the flags.
const size_t DIR_ADDRESS = (~0xFFFUL); // Bits used for the address.
const size_t ENTRIES = 4096 / sizeof(addr_t);
struct Table
{
addr_t page[ENTRIES];
};
struct Dir
{
addr_t table[ENTRIES];
};
#ifdef PLATFORM_X86 #ifdef PLATFORM_X86
// These structures are always virtually mapped to these addresses. // These structures are always virtually mapped to these addresses.
Dir* CurrentDir = (Dir*) 0xFFBFF000; Table* const makingTable = (Table*) 0xFFBFC000UL;
Table* CurrentTables = (Table*) 0xFFC00000; Dir* const makingDir = (Dir*) 0xFFBFD000UL;
uintptr_t KernelHalfStart = 0x80000000; Dir* const kernelDir = (Dir*) 0xFFBFE000UL;
uintptr_t KernelHalfEnd = 0xFFBFF000; Dir* const currentDir = (Dir*) 0xFFBFF000UL;
Table* const currentTables = (Table*) 0xFFC00000UL;
#endif #endif
#ifdef PLATFORM_X64 #ifdef PLATFORM_X64
// TODO: These are dummy values! // TODO: These are dummy values!
Dir* CurrentDir = (Dir*) 0xFACEB00C; const Dir* currentDir = (Dir*) 0xFACEB00C;
Table* CurrentTables = (Table*) 0xFACEB00C; const Table* currentTables = (Table*) 0xFACEB00C;
uintptr_t KernelHalfStart = 0xFACEB00C;
uintptr_t KernelHalfEnd = 0xFACEB00C;
#endif #endif
const size_t PointersPerPage = 4096 / sizeof(uintptr_t); addr_t currentDirPhysical;
size_t KernelLeadingPages; #ifdef PLATFORM_X86
Dir* CurrentDirPhys; Table* BootstrapCreateTable(Dir* dir, addr_t where);
Dir* KernelDirPhys; void BootstrapMap(Dir* dir, addr_t where, addr_t physical);
bool Virgin; void BootstrapMapStructures(Dir* dir);
void SwitchDirectory(addr_t dir);
#define ENABLE_PAGING() \ addr_t CreateDirectory();
{ \ #endif
size_t cr0; \
asm volatile("mov %%cr0, %0": "=r"(cr0)); \
cr0 |= 0x80000000UL; /* Enable paging! */ \
asm volatile("mov %0, %%cr0":: "r"(cr0)); \
}
#define DISABLE_PAGING() \
{ \
size_t cr0; \
asm volatile("mov %%cr0, %0": "=r"(cr0)); \
cr0 &= ~(0x80000000UL); /* Disable paging! */ \
asm volatile("mov %0, %%cr0":: "r"(cr0)); \
}
// Internally used functions
void IdentityPhys(uintptr_t Addr);
void FixupPhys(Dir* D);
Table* GetTablePhys(uintptr_t Addr);
void DebugTable(char* Base, Table* T)
{
#ifdef PLATFORM_X86
Log::PrintF("-- Recursing to table at 0x%p spanning [0x%p - 0x%p] --\n", T, Base, Base + 1024 * 0x1000 - 1);
for ( size_t I = 0; I < 1024; I++ )
{
uintptr_t Entry = T->Page[I];
if ( Entry == 0 ) { continue; }
Log::PrintF("[0x%p] -> [0x%p] [Flags=0x%x]\n", Base + I * 0x1000, Entry & TABLE_ADDRESS, Entry & TABLE_FLAGS);
while ( true )
{
__asm__ ( "hlt" );
//uint32_t Key = GKeyboard->HackGetKey();
//if ( Key == '\n' ) { break; }
//if ( Key == 0xFFFFFFFF - 25 ) { I += 23; break; }
}
}
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
void DebugDir(Dir* D)
{
#ifdef PLATFORM_X86
Log::PrintF("-- Start of debug of page dir at 0x%p --\n", D);
if ( (uintptr_t) D & 1 ) { Log::PrintF("-- SOMETHING IS WRONG! --\n", D); while ( true ) { __asm__ ( "hlt" ); } }
for ( size_t I = 0; I < 1024; I++ )
{
if ( !(D->Table[I] & DIR_PRESENT) ) { continue; }
Table* T = (Table*) (D->Table[I] & DIR_ADDRESS);
DebugTable((char*) (I * 0x40000), T);
}
Log::PrintF("-- End of debug of page dir at 0x%p --\n", D);
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
void Init() void Init()
{ {
#ifdef PLATFORM_X86 #ifdef PLATFORM_X86
Virgin = true;
// Allocate a page dir and reset it. // Initialize variables.
CurrentDirPhys = (Dir*) Page::Get(); currentDirPhysical = 0;
if ( CurrentDirPhys == NULL ) { Panic("memorymanagement.cpp: Could not allocate page dir"); }
Memory::Set(CurrentDirPhys, 0, sizeof(Dir));
//while ( true ) { DebugDir(CurrentDirPhys); } // Allocate a page we can use for our kernel page directory.
Dir* dirphys = (Dir*) Page::Get();
if ( dirphys == NULL ) { Panic("memorymanagement.cpp: Could not allocate page dir"); }
Memory::Set(dirphys, 0, sizeof(Dir));
// Identity map the kernel. // Identity map the kernel.
for ( uintptr_t P = KernelStart; P < KernelStart + KernelLength; P += 0x1000 ) { IdentityPhys(P); } for ( addr_t ptr = KernelStart; ptr < KernelStart + KernelLength; ptr += 0x1000UL )
GetTablePhys(0x400000UL);
GetTablePhys(0x80000000UL - 4096UL);
// Initialize all the kernel tables from 0x8000000 to 0xFFFFFFFF here!
for ( uintptr_t P = KernelHalfStart; P < KernelHalfEnd; P += 4096 ) { GetTablePhys(P); }
// Prepare the page dir for real usage.
FixupPhys(CurrentDirPhys);
//while ( true ) { DebugDir(CurrentDirPhys); }
// Now switch to the initial virtual address space.
SwitchDir(CurrentDirPhys);
// Remember this page dir as it is our base page dir.
KernelDirPhys = CurrentDirPhys;
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
Table* GetTablePhys(uintptr_t Addr)
{
#ifdef PLATFORM_X86
// Find the desired table entry, if existing.
uintptr_t DirIndex = Addr / 0x400000UL; // 4 MiB
uintptr_t T = CurrentDirPhys->Table[DirIndex] & DIR_ADDRESS;
// If the table doesn't exist, create it.
if ( T == NULL )
{ {
// Allocate a page. BootstrapMap(dirphys, ptr, ptr);
T = (uintptr_t) Page::Get();
// Check if anything went wrong.
if ( T == NULL ) { Panic("memorymanagement.cpp: Could not allocate page table"); }
// Reset the page's contents.
Memory::Set((void*) T, 0, sizeof(Table));
// Now add some flags
uintptr_t Flags = DIR_PRESENT | DIR_WRITABLE | DIR_USER_SPACE;
CurrentDirPhys->Table[DirIndex] = T | Flags;
} }
return (Table*) T; // Create every table used in the kernel half. We do it now such that
#else // any copies of the kernel dir never gets out of date.
#warning "Virtual Memory is not available on this arch" for ( addr_t ptr = 0x80000000UL; ptr != 0UL; ptr += ENTRIES * 0x1000UL )
while(true);
return NULL;
#endif
}
void IdentityPhys(uintptr_t Addr)
{
#ifdef PLATFORM_X86
Table* T = GetTablePhys(Addr);
uintptr_t Flags = TABLE_PRESENT | TABLE_WRITABLE;
size_t TableIndex = (Addr % 0x400000) / 0x1000;
T->Page[TableIndex] = Addr | Flags;
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
void FixupPhys(Dir* D)
{
#ifdef PLATFORM_X86
Table* SecondLastTable = GetTablePhys((uintptr_t) CurrentDir);
uintptr_t Flags = TABLE_PRESENT | TABLE_WRITABLE;
uintptr_t DirEntry = ((uintptr_t) D) | Flags;
SecondLastTable->Page[PointersPerPage-1] = DirEntry;
Table* LastTable = GetTablePhys((uintptr_t) CurrentTables);
for ( size_t I = 0; I < PointersPerPage; I++ )
{ {
LastTable->Page[I] = (D->Table[I] & DIR_ADDRESS) | Flags; BootstrapCreateTable(dirphys, ptr);
}
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
void Fixup(Dir* D)
{
#ifdef PLATFORM_X86
uintptr_t Flags = TABLE_PRESENT | TABLE_WRITABLE;
Table* T = &CurrentTables[PointersPerPage-1];
for ( size_t I = 0; I < PointersPerPage; I++ )
{
T->Page[I] = (D->Table[I] & DIR_ADDRESS) | Flags;
}
#else
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
}
void SwitchDir(Dir* PhysicalDirAddr)
{
#ifdef PLATFORM_X86
// Set the new page directory.
CurrentDirPhys = PhysicalDirAddr;
asm volatile("mov %0, %%cr3":: "r"(PhysicalDirAddr));
if ( !Virgin )
{
uintptr_t Entry = ((uintptr_t) PhysicalDirAddr & DIR_ADDRESS) | TABLE_PRESENT | TABLE_WRITABLE;
CurrentTables[PointersPerPage-2].Page[PointersPerPage-1] = Entry;
} }
// Reset the paging flag in the cr0 register to enable paging, and flush the paging cache. // Map the paging structures themselves.
ENABLE_PAGING(); BootstrapMapStructures(dirphys);
// We have now created a minimal virtual environment where the kernel
// is mapped, the paging structures are ready, and the paging
// structures are mapped. We are now ready to enable pages.
// Switch the current dir - this enables paging.
SwitchDirectory((addr_t) dirphys);
// Hello, virtual world!
Virgin = false;
#else #else
#warning "Virtual Memory is not available on this arch" #warning "Virtual Memory is not available on this arch"
while(true); while(true);
#endif #endif
} }
#ifdef PLATFORM_X86
inline addr_t GetTableId(addr_t where) { return where / (4096UL * ENTRIES); }
inline addr_t GetPageId(addr_t where) { return ( where / 4096UL ) % ENTRIES; }
Table* BootstrapCreateTable(Dir* dir, addr_t where)
{
size_t tableid = GetTableId(where);
addr_t tabledesc = dir->table[tableid];
if ( tabledesc != 0 )
{
return (Table*) (tabledesc & TABLE_ADDRESS);
}
else
{
addr_t tablepage = Page::Get();
if ( tablepage == 0 )
{
PanicF("memorymanagement.cpp: Could not allocate bootstrap page table for 0x%p", where);
}
Memory::Set((void*) tablepage, 0, sizeof(Table));
tabledesc = tablepage | TABLE_PRESENT | TABLE_WRITABLE;
dir->table[tableid] = tabledesc;
ASSERT((Table*) tablepage == BootstrapCreateTable(dir, where));
return (Table*) tablepage;
}
}
void BootstrapMap(Dir* dir, addr_t where, addr_t physical)
{
Table* table = BootstrapCreateTable(dir, where);
size_t pageid = GetPageId(where);
table->page[pageid] = physical | TABLE_PRESENT | TABLE_WRITABLE;
}
void BootstrapMapStructures(Dir* dir)
{
// Map the dir itself.
BootstrapMap(dir, (addr_t) kernelDir, (addr_t) dir);
BootstrapMap(dir, (addr_t) currentDir, (addr_t) dir);
// Map the tables.
for ( size_t i = 0; i < ENTRIES; i++ )
{
addr_t tabledesc = dir->table[i];
if ( tabledesc == 0 ) { continue; }
addr_t mapto = (addr_t) &(currentTables[i]);
addr_t mapfrom = (tabledesc & TABLE_ADDRESS);
BootstrapMap(dir, mapto, mapfrom);
}
}
#endif
#ifdef PLATFORM_X86
addr_t Lookup(addr_t where)
{
// Make sure we are only mapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
if ( !(tabledesc & DIR_PRESENT) ) { return 0; }
size_t pageid = GetPageId(where);
return currentTables[tableid].page[pageid];
}
// Enables paging and flushes the Translation Lookaside Buffer (TLB).
void Flush() void Flush()
{ {
#ifdef PLATFORM_X86 asm volatile("mov %0, %%cr3":: "r"(currentDirPhysical));
Fixup(CurrentDir); size_t cr0; \
asm volatile("mov %%cr0, %0": "=r"(cr0));
ENABLE_PAGING(); cr0 |= 0x80000000UL; // Enable paging!
#else asm volatile("mov %0, %%cr0":: "r"(cr0));
#warning "Virtual Memory is not available on this arch"
while(true);
#endif
} }
Dir* NewDir() addr_t CreateAddressSpace()
{ {
#ifdef PLATFORM_X86 return CreateDirectory();
DISABLE_PAGING(); }
// TODO: Is the stack well defined here?! void SwitchAddressSpace(addr_t addrspace)
Dir* Result = (Dir*) Page::Get(); {
return SwitchDirectory(addrspace);
}
if ( Result != NULL ) void SwitchDirectory(addr_t dir)
{
asm volatile("mov %0, %%cr3":: "r"(dir));
currentDirPhysical = dir;
Flush();
}
addr_t CreateDirectory()
{
// Allocate the thread pages we need, one for the new pagedir,
// and two for the last two 8 MiB of the pagedir.
addr_t newdir = Page::Get();
if ( newdir == 0 ) { return 0; }
addr_t newstructmap1 = Page::Get();
if ( newdir == 0 ) { Page::Put(newdir); return 0; }
addr_t newstructmap2 = Page::Get();
if ( newdir == 0 ) { Page::Put(newdir); Page::Put(newstructmap1); return 0; }
// Map the new pagedir, clone the kernel dir, and change the last
// 8 MiB, such that we can map the new page structures there.
MapKernel((addr_t) makingDir, newdir);
Memory::Copy(makingDir, kernelDir, sizeof(Dir));
makingDir->table[1024-2] = newstructmap1 | DIR_PRESENT | DIR_WRITABLE;
makingDir->table[1024-1] = newstructmap2 | DIR_PRESENT | DIR_WRITABLE;
// Build the new page structures.
MapKernel((addr_t) makingTable, newstructmap1);
Memory::Set(makingTable, 0, sizeof(Table));
makingTable->page[1024-2] = currentTables[1024-2].page[1024-2];
makingTable->page[1024-1] = newdir | TABLE_PRESENT | TABLE_WRITABLE;
// Build the new page structures.
MapKernel((addr_t) makingTable, newstructmap2);
for ( size_t i = 0; i < 1024-2; i++ )
{ {
Memory::Copy(Result, KernelDirPhys, sizeof(Dir)); makingTable->page[i] = currentTables[1024-1].page[i];
}
makingTable->page[1024-2] = newstructmap1 | TABLE_PRESENT | TABLE_WRITABLE;
makingTable->page[1024-1] = newstructmap2 | TABLE_PRESENT | TABLE_WRITABLE;
return newdir;
}
void MapKernel(addr_t where, addr_t physical)
{
// Make sure we are only mapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
ASSERT(tabledesc != 0);
ASSERT((tabledesc & DIR_USER_SPACE) == 0);
size_t pageid = GetPageId(where);
addr_t pagedesc = physical | TABLE_PRESENT | TABLE_WRITABLE;
currentTables[tableid].page[pageid] = pagedesc;
ASSERT(Lookup(where) == pagedesc);
// TODO: Only update the single page!
Flush();
}
addr_t UnmapKernel(addr_t where)
{
// Make sure we are only unmapping kernel-approved pages.
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
ASSERT(tabledesc != 0);
ASSERT((tabledesc & DIR_USER_SPACE) == 0);
size_t pageid = GetPageId(where);
addr_t result = currentTables[tableid].page[pageid];
ASSERT((result & TABLE_PRESENT) != 0);
result &= TABLE_ADDRESS;
currentTables[tableid].page[pageid] = 0;
// TODO: Only update the single page!
Flush();
return result;
}
Table* CreateUserTable(addr_t where, bool maycreate)
{
size_t tableid = GetTableId(where);
addr_t tabledesc = currentDir->table[tableid];
Table* table = &(currentTables[tableid]);
if ( tabledesc == 0 )
{
ASSERT(maycreate);
addr_t tablepage = Page::Get();
if ( tablepage == 0 ) { return NULL; }
tabledesc = tablepage | TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE;
currentDir->table[tableid] = tabledesc;
MapKernel((addr_t) table, tablepage);
// TODO: Only update the single page!
Flush();
addr_t lookup = Lookup((addr_t) table) & TABLE_ADDRESS;
ASSERT(lookup == tablepage);
Memory::Set(table, 0, sizeof(Table));
} }
ENABLE_PAGING(); // Make sure we only touch dirs permitted for use by user-space!
ASSERT((tabledesc & TABLE_USER_SPACE) != 0);
return Result; return table;
#else
#warning "Virtual Memory is not available on this arch"
while(true);
return NULL;
#endif
} }
void Map(uintptr_t Physical, uintptr_t Virtual, uintptr_t Flags) bool MapUser(addr_t where, addr_t physical)
{ {
#ifdef PLATFORM_X86 // Make sure we are only mapping user-space-approved pages.
// TODO: Possibly validate Physical and Virtual are aligned, and that Table* table = CreateUserTable(where, true);
// flags uses only legal bits. Should the function then Panic? if ( table == NULL ) { return false; }
size_t DirIndex = Virtual / 0x400000; // 4 MiB
// See if the required table in the dir exists. size_t pageid = GetPageId(where);
if ( !(CurrentDir->Table[DirIndex] & DIR_PRESENT) ) addr_t pagedesc = physical | TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE;
{
Log::PrintF("3-1-1\n");
//DISABLE_PAGING();
// TODO: Is the stack well defined here?!
// This will create the table we need. table->page[pageid] = pagedesc;
GetTablePhys(Virtual);
//ENABLE_PAGING(); Flush();
}
size_t TableIndex = (Virtual % 0x400000) / 0x1000; ASSERT(Lookup(where) == pagedesc);
CurrentTables[DirIndex].Page[TableIndex] = Physical | Flags; // TODO: Only update the single page!
#else Flush();
#warning "Virtual Memory is not available on this arch"
while(true); return true;
#endif
} }
// Unsafe version of VirtualMemory::Map. This has no error checking, but is faster. addr_t UnmapUser(addr_t where)
void MapUnsafe(uintptr_t Physical, uintptr_t Virtual, uintptr_t Flags)
{ {
#ifdef PLATFORM_X86 // Make sure we are only mapping user-space-approved pages.
size_t DirIndex = Virtual / 0x400000; // 4 MiB Table* table = CreateUserTable(where, false);
size_t TableIndex = (Virtual % 0x400000) / 0x1000; ASSERT(table != NULL);
CurrentTables[DirIndex].Page[TableIndex] = Physical | Flags; size_t pageid = GetPageId(where);
#else addr_t pagedesc = table->page[pageid];
#warning "Virtual Memory is not available on this arch" ASSERT((pagedesc & TABLE_PRESENT) != 0);
while(true); addr_t result = pagedesc & TABLE_ADDRESS;
#endif table->page[pageid] = 0;
// TODO: Only update the single page!
Flush();
return result;
} }
void* LookupAddr(uintptr_t Virtual) #else
{
#ifdef PLATFORM_X86 #warning "Virtual Memory is not available on this arch"
size_t DirIndex = Virtual / 0x400000; // 4 MiB
size_t TableIndex = (Virtual % 0x400000) / 0x1000; void Flush() { while(true); }
void SwitchDirectory(addr_t dir) { while(true); }
return (void*) (CurrentTables[DirIndex].Page[TableIndex] & TABLE_ADDRESS); addr_t CreateDirectory() { while(true); return 0; }
#else addr_t UnmapKernel(addr_t where) { while(true); return 0; }
#warning "Virtual Memory is not available on this arch" addr_t UnmapUser(addr_t where) { while(true); return 0; }
while(true);
return NULL; #endif
#endif
}
} }
} }

View File

@ -32,66 +32,27 @@ namespace Sortix
#ifdef MULTIBOOT_HEADER #ifdef MULTIBOOT_HEADER
void Init(multiboot_info_t* BootInfo); void Init(multiboot_info_t* BootInfo);
#endif #endif
void* Get(); addr_t Get();
void Put(void* Page); void Put(addr_t Page);
void GetNoCache();
void PutNoCache(void* Page);
} }
namespace VirtualMemory namespace VirtualMemory
{ {
// TODO: Convert these to constants! void Init();
#define TABLE_PRESENT (1<<0) void Flush();
#define TABLE_WRITABLE (1<<1) addr_t CreateAddressSpace();
#define TABLE_USER_SPACE (1<<2) void SwitchAddressSpace(addr_t addrspace);
#define TABLE_RESERVED1 (1<<3) // Used internally by the CPU. void MapKernel(addr_t where, addr_t physical);
#define TABLE_RESERVED2 (1<<4) // Used internally by the CPU. bool MapUser(addr_t where, addr_t physical);
#define TABLE_ACCESSED (1<<5) addr_t UnmapKernel(addr_t where);
#define TABLE_DIRTY (1<<6) addr_t UnmapUser(addr_t where);
#define TABLE_RESERVED3 (1<<7) // Used internally by the CPU.
#define TABLE_RESERVED4 (1<<8) // Used internally by the CPU.
#define TABLE_AVAILABLE1 (1<<9)
#define TABLE_AVAILABLE2 (1<<10)
#define TABLE_AVAILABLE3 (1<<11)
#define TABLE_FLAGS (0xFFFUL) // Bits used for the flags.
#define TABLE_ADDRESS (~0xFFFUL) // Bits used for the address.
#define DIR_PRESENT (1<<0) #ifdef PLATFORM_X86
#define DIR_WRITABLE (1<<1) const addr_t heapLower = 0x80000000UL;
#define DIR_USER_SPACE (1<<2) const addr_t heapUpper = 0xFF800000UL;
#define DIR_WRITE_THROUGH (1<<3) #endif
#define DIR_DISABLE_CACHE (1<<4)
#define DIR_ACCESSED (1<<5)
#define DIR_RESERVED1 (1<<6)
#define DIR_4MIB_PAGES (1<<7)
#define DIR_RESERVED2 (1<<8)
#define DIR_AVAILABLE1 (1<<9)
#define DIR_AVAILABLE2 (1<<10)
#define DIR_AVAILABLE3 (1<<11)
#define DIR_FLAGS (0xFFFUL) // Bits used for the flags.
#define DIR_ADDRESS (~0xFFFUL) // Bits used for the address.
struct Table
{
uintptr_t Page[4096 / sizeof(uintptr_t)];
};
struct Dir
{
uintptr_t Table[4096 / sizeof(uintptr_t*)];
};
void Init();
void SwitchDir(Dir* PhysicalDirAddr);
void Map(uintptr_t Physical, uintptr_t Virtual, uintptr_t Flags);
void MapUnsafe(uintptr_t Physical, uintptr_t Virtual, uintptr_t Flags);
void* LookupAddr(uintptr_t Virtual);
void Flush();
void Fixup(Dir* Dir);
Dir* NewDir();
} }
bool ValidateUserString(const char* USER string);
} }
#endif #endif

View File

@ -38,7 +38,7 @@ namespace Sortix
{ {
if ( longpanic ) if ( longpanic )
{ {
Log::Print("\e[m\e[31m\e[2J\e[H"); Log::Print("\e[m\e[31;40m\e[2J\e[H");
Log::Print(" _ "); Log::Print(" _ ");
Log::Print(" / \\ "); Log::Print(" / \\ ");
Log::Print(" /\\ /\\ / \\ "); Log::Print(" /\\ /\\ / \\ ");

View File

@ -55,7 +55,7 @@ namespace Sortix
size_t AllocatedThreadId; size_t AllocatedThreadId;
} }
Thread::Thread(Process* process, size_t id, size_t* stack, size_t stackLength) Thread::Thread(Process* process, size_t id, addr_t stack, size_t stackLength)
{ {
_process = process; _process = process;
_id = id; _id = id;
@ -204,17 +204,15 @@ namespace Sortix
#endif #endif
// Allocate and set up a stack for the kernel to use during interrupts. // Allocate and set up a stack for the kernel to use during interrupts.
void* KernelStackPage = Page::Get(); addr_t KernelStackPage = Page::Get();
if ( KernelStackPage == NULL ) { Panic("scheduler.cpp: could not allocate kernel interrupt stack for tss!"); } if ( KernelStackPage == 0 ) { Panic("scheduler.cpp: could not allocate kernel interrupt stack for tss!"); }
#ifdef PLATFORM_VIRTUAL_MEMORY #ifdef PLATFORM_VIRTUAL_MEMORY
uintptr_t MapTo = 0x80000000; uintptr_t MapTo = 0x80000000;
VirtualMemory::Map((uintptr_t) KernelStackPage, MapTo, TABLE_PRESENT | TABLE_WRITABLE); VirtualMemory::MapKernel(MapTo, (uintptr_t) KernelStackPage);
VirtualMemory::Flush();
#endif #endif
size_t* KernelStack = ((size_t*) KernelStackPage) + 4096 / sizeof(size_t);
GDT::SetKernelStack((size_t*) (MapTo+4096)); GDT::SetKernelStack((size_t*) (MapTo+4096));
} }
@ -244,8 +242,8 @@ namespace Sortix
// Allocate a stack for this thread. // Allocate a stack for this thread.
size_t StackLength = StackSize / sizeof(size_t); size_t StackLength = StackSize / sizeof(size_t);
size_t* PhysStack = (size_t*) Page::Get(); addr_t PhysStack = Page::Get();
if ( PhysStack == NULL ) if ( PhysStack == 0 )
{ {
#ifndef PLATFORM_KERNEL_HEAP #ifndef PLATFORM_KERNEL_HEAP
Page::Put(ThreadPage); Page::Put(ThreadPage);
@ -266,7 +264,7 @@ namespace Sortix
uintptr_t StackPos = 0x80000000UL; uintptr_t StackPos = 0x80000000UL;
uintptr_t MapTo = StackPos - 4096UL; uintptr_t MapTo = StackPos - 4096UL;
VirtualMemory::Map((uintptr_t) PhysStack, MapTo, TABLE_PRESENT | TABLE_WRITABLE | TABLE_USER_SPACE); VirtualMemory::MapUser(MapTo, PhysStack);
VirtualMemory::Flush(); VirtualMemory::Flush();
#else #else
uintptr_t StackPos = (uintptr_t) PhysStack + 4096; uintptr_t StackPos = (uintptr_t) PhysStack + 4096;
@ -423,7 +421,11 @@ namespace Sortix
// TODO: What do we do with the result parameter? // TODO: What do we do with the result parameter?
Thread->~Thread(); Thread->~Thread();
//Log::PrintF("<ExitedThread debug=\"2\" thread=\"%p\"/>\n", Thread); //Log::PrintF("<ExitedThread debug=\"2\" thread=\"%p\"/>\n", Thread);
Page::Put(Thread); #ifndef PLATFORM_KERNEL_HEAP
Page::Put((addr_t) Thread);
#else
delete Thread;
#endif
//Log::PrintF("<ExitedThread debug=\"3\" thread=\"%p\"/>\n", Thread); //Log::PrintF("<ExitedThread debug=\"3\" thread=\"%p\"/>\n", Thread);
if ( Thread == currentThread ) { currentThread = NULL; } if ( Thread == currentThread ) { currentThread = NULL; }

View File

@ -37,7 +37,7 @@ namespace Sortix
typedef void* (*Entry)(void* Parameter); typedef void* (*Entry)(void* Parameter);
public: public:
Thread(Process* process, size_t id, size_t* stack, size_t stackLength); Thread(Process* process, size_t id, addr_t stack, size_t stackLength);
~Thread(); ~Thread();
public: public:
@ -46,7 +46,7 @@ namespace Sortix
private: private:
size_t _id; size_t _id;
size_t* _stack; addr_t _stack;
size_t _stackLength; size_t _stackLength;
Process* _process; Process* _process;
State _state; State _state;

View File

@ -141,9 +141,9 @@ OutOfMem:
movl $0, %eax movl $0, %eax
ret ret
.globl _ZN6Sortix4Page3PutEPv .globl _ZN6Sortix4Page3PutEm
.type _ZN6Sortix4Page3PutEPv, @function # namespace Sortix { void Paging::Free(void* Page); } .type _ZN6Sortix4Page3PutEm, @function # namespace Sortix { void Paging::Free(void* Page); }
_ZN6Sortix4Page3PutEPv: _ZN6Sortix4Page3PutEm:
push %esi push %esi
mov _ZN6Sortix4Page15UnallocatedPageE, %eax # Load UnallocPage* Sortix::Page::UnallocatedPage mov _ZN6Sortix4Page15UnallocatedPageE, %eax # Load UnallocPage* Sortix::Page::UnallocatedPage
mov 0x8(%esp), %edx mov 0x8(%esp), %edx