Fix read-only mmap with backing store.

This commit is contained in:
Jonas 'Sortie' Termansen 2015-05-14 15:19:23 +02:00
parent 423fbad835
commit 9acc74de28
4 changed files with 29 additions and 11 deletions

View File

@ -285,17 +285,20 @@ uintptr_t Load(const void* file_ptr, size_t file_size, Auxiliary* aux)
assert(IsUserspaceSegment(&segment)); assert(IsUserspaceSegment(&segment));
kthread_mutex_lock(&process->segment_write_lock);
kthread_mutex_lock(&process->segment_lock); kthread_mutex_lock(&process->segment_lock);
if ( IsSegmentOverlapping(process, &segment) ) if ( IsSegmentOverlapping(process, &segment) )
{ {
kthread_mutex_unlock(&process->segment_lock); kthread_mutex_unlock(&process->segment_lock);
kthread_mutex_unlock(&process->segment_write_lock);
return errno = EINVAL, 0; return errno = EINVAL, 0;
} }
if ( !Memory::MapRange(segment.addr, segment.size, prot, PAGE_USAGE_USER_SPACE) ) if ( !Memory::MapRange(segment.addr, segment.size, prot, PAGE_USAGE_USER_SPACE) )
{ {
kthread_mutex_unlock(&process->segment_lock); kthread_mutex_unlock(&process->segment_lock);
kthread_mutex_unlock(&process->segment_write_lock);
return errno = EINVAL, 0; return errno = EINVAL, 0;
} }
@ -303,10 +306,12 @@ uintptr_t Load(const void* file_ptr, size_t file_size, Auxiliary* aux)
{ {
Memory::UnmapRange(segment.addr, segment.size, PAGE_USAGE_USER_SPACE); Memory::UnmapRange(segment.addr, segment.size, PAGE_USAGE_USER_SPACE);
kthread_mutex_unlock(&process->segment_lock); kthread_mutex_unlock(&process->segment_lock);
kthread_mutex_unlock(&process->segment_write_lock);
return errno = EINVAL, 0; return errno = EINVAL, 0;
} }
kthread_mutex_unlock(&process->segment_lock); kthread_mutex_unlock(&process->segment_lock);
kthread_mutex_unlock(&process->segment_write_lock);
memset((void*) segment.addr, 0, segment.size); memset((void*) segment.addr, 0, segment.size);
memcpy((void*) pheader->p_vaddr, file + pheader->p_offset, pheader->p_filesz); memcpy((void*) pheader->p_vaddr, file + pheader->p_offset, pheader->p_filesz);

View File

@ -153,6 +153,7 @@ public:
struct segment* segments; struct segment* segments;
size_t segments_used; size_t segments_used;
size_t segments_length; size_t segments_length;
kthread_mutex_t segment_write_lock;
kthread_mutex_t segment_lock; kthread_mutex_t segment_lock;
public: public:

View File

@ -63,6 +63,7 @@ namespace Memory {
void UnmapMemory(Process* process, uintptr_t addr, size_t size) void UnmapMemory(Process* process, uintptr_t addr, size_t size)
{ {
// process->segment_write_lock is held.
// process->segment_lock is held. // process->segment_lock is held.
assert(Page::IsAligned(addr)); assert(Page::IsAligned(addr));
assert(Page::IsAligned(size)); assert(Page::IsAligned(size));
@ -133,6 +134,7 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot) bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot)
{ {
// process->segment_write_lock is held.
// process->segment_lock is held. // process->segment_lock is held.
assert(Page::IsAligned(addr)); assert(Page::IsAligned(addr));
assert(Page::IsAligned(size)); assert(Page::IsAligned(size));
@ -224,6 +226,7 @@ bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot)
bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot) bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot)
{ {
// process->segment_write_lock is held.
// process->segment_lock is held. // process->segment_lock is held.
assert(Page::IsAligned(addr)); assert(Page::IsAligned(addr));
assert(Page::IsAligned(size)); assert(Page::IsAligned(size));
@ -247,8 +250,8 @@ bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot)
return false; return false;
} }
// We have process->segment_lock locked, so we know that the memory in user // We have process->segment_write_lock locked, so we know that the memory in
// space exists and we can safely zero it here. // user space exists and we can safely zero it here.
// TODO: Another thread is able to see the old contents of the memory before // TODO: Another thread is able to see the old contents of the memory before
// we zero it causing potential information leaks. // we zero it causing potential information leaks.
// TODO: SECURITY: Information leak. // TODO: SECURITY: Information leak.
@ -335,7 +338,8 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd,
return errno = EACCES, MAP_FAILED; return errno = EACCES, MAP_FAILED;
} }
ScopedLock lock(&process->segment_lock); ScopedLock lock1(&process->segment_write_lock);
ScopedLock lock2(&process->segment_lock);
// Determine where to put the new segment and its protection. // Determine where to put the new segment and its protection.
struct segment new_segment; struct segment new_segment;
@ -351,20 +355,18 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd,
return MAP_FAILED; return MAP_FAILED;
// The pread will copy to user-space right requires this lock to be free. // The pread will copy to user-space right requires this lock to be free.
// TODO: This means another thread can concurrently change this memory lock2.Reset();
// mapping while the memory-mapped contents are being delivered,
// resulting in an odd mix.
lock.Reset();
// Read the file contents into the newly allocated memory. // Read the file contents into the newly allocated memory.
if ( !(flags & MAP_ANONYMOUS) ) if ( !(flags & MAP_ANONYMOUS) )
{ {
ioctx_t kctx; SetupKernelIOCtx(&kctx);
for ( size_t so_far = 0; so_far < aligned_size; ) for ( size_t so_far = 0; so_far < aligned_size; )
{ {
uint8_t* ptr = (uint8_t*) (new_segment.addr + so_far); uint8_t* ptr = (uint8_t*) (new_segment.addr + so_far);
size_t left = aligned_size - so_far; size_t left = aligned_size - so_far;
off_t pos = offset + so_far; off_t pos = offset + so_far;
ssize_t num_bytes = desc->pread(&ctx, ptr, left, pos); ssize_t num_bytes = desc->pread(&kctx, ptr, left, pos);
if ( num_bytes < 0 ) if ( num_bytes < 0 )
{ {
// TODO: How should this situation be handled? For now we'll // TODO: How should this situation be handled? For now we'll
@ -383,6 +385,8 @@ void* sys_mmap(void* addr_ptr, size_t size, int prot, int flags, int fd,
} }
} }
lock1.Reset();
return (void*) new_segment.addr; return (void*) new_segment.addr;
} }
@ -400,7 +404,8 @@ int sys_mprotect(const void* addr_ptr, size_t size, int prot)
prot |= PROT_KREAD | PROT_KWRITE | PROT_FORK; prot |= PROT_KREAD | PROT_KWRITE | PROT_FORK;
Process* process = CurrentProcess(); Process* process = CurrentProcess();
ScopedLock lock(&process->segment_lock); ScopedLock lock1(&process->segment_write_lock);
ScopedLock lock2(&process->segment_lock);
if ( !Memory::ProtectMemory(process, addr, size, prot) ) if ( !Memory::ProtectMemory(process, addr, size, prot) )
return -1; return -1;
@ -421,7 +426,8 @@ int sys_munmap(void* addr_ptr, size_t size)
size = Page::AlignUp(size); size = Page::AlignUp(size);
Process* process = CurrentProcess(); Process* process = CurrentProcess();
ScopedLock lock(&process->segment_lock); ScopedLock lock1(&process->segment_write_lock);
ScopedLock lock2(&process->segment_lock);
Memory::UnmapMemory(process, addr, size); Memory::UnmapMemory(process, addr, size);

View File

@ -149,6 +149,7 @@ Process::Process()
segments = NULL; segments = NULL;
segments_used = 0; segments_used = 0;
segments_length = 0; segments_length = 0;
segment_write_lock = KTHREAD_MUTEX_INITIALIZER;
segment_lock = KTHREAD_MUTEX_INITIALIZER; segment_lock = KTHREAD_MUTEX_INITIALIZER;
user_timers_lock = KTHREAD_MUTEX_INITIALIZER; user_timers_lock = KTHREAD_MUTEX_INITIALIZER;
@ -376,7 +377,8 @@ void Process::LastPrayer()
void Process::ResetAddressSpace() void Process::ResetAddressSpace()
{ {
ScopedLock lock(&segment_lock); ScopedLock lock1(&segment_write_lock);
ScopedLock lock2(&segment_lock);
assert(Memory::GetAddressSpace() == addrspace); assert(Memory::GetAddressSpace() == addrspace);
@ -798,6 +800,7 @@ void Process::ResetForExecute()
bool Process::MapSegment(struct segment* result, void* hint, size_t size, bool Process::MapSegment(struct segment* result, void* hint, size_t size,
int flags, int prot) int flags, int prot)
{ {
// process->segment_write_lock is held at this point.
// process->segment_lock is held at this point. // process->segment_lock is held at this point.
if ( !size ) if ( !size )
@ -908,6 +911,7 @@ int Process::Execute(const char* programname, const uint8_t* program,
struct segment tls_segment; struct segment tls_segment;
struct segment auxcode_segment; struct segment auxcode_segment;
kthread_mutex_lock(&segment_write_lock);
kthread_mutex_lock(&segment_lock); kthread_mutex_lock(&segment_lock);
if ( !(MapSegment(&arg_segment, stack_hint, arg_size, 0, stack_prot) && if ( !(MapSegment(&arg_segment, stack_hint, arg_size, 0, stack_prot) &&
@ -917,11 +921,13 @@ int Process::Execute(const char* programname, const uint8_t* program,
MapSegment(&auxcode_segment, auxcode_hint, auxcode_size, 0, auxcode_prot)) ) MapSegment(&auxcode_segment, auxcode_hint, auxcode_size, 0, auxcode_prot)) )
{ {
kthread_mutex_unlock(&segment_lock); kthread_mutex_unlock(&segment_lock);
kthread_mutex_unlock(&segment_write_lock);
ResetForExecute(); ResetForExecute();
return errno = ENOMEM, -1; return errno = ENOMEM, -1;
} }
kthread_mutex_unlock(&segment_lock); kthread_mutex_unlock(&segment_lock);
kthread_mutex_unlock(&segment_write_lock);
char** target_argv = (char**) (arg_segment.addr + 0); char** target_argv = (char**) (arg_segment.addr + 0);
char** target_envp = (char**) (arg_segment.addr + argv_size); char** target_envp = (char**) (arg_segment.addr + argv_size);