1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-10 21:50:37 -08:00

First freebsd attempt: branch linux files.

Copied from Perforce
 Change: 22636
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Nick Barnes 2001-09-24 17:17:56 +01:00
parent d57d192c76
commit dccbb481fd
9 changed files with 1278 additions and 1 deletions

25
mps/src/fri4gc.gmk Normal file
View file

@ -0,0 +1,25 @@
# impl.gmk.lii4gc: BUILD FOR LINUX/INTEL/GCC PLATFORM
#
# $HopeName: MMsrc!lii4gc.gmk(trunk.5) $
# Copyright (C) 2000 Harlequin Limited. All rights reserved.
PFM = fri4gc
PFMDEFS = -D_REENTRANT
MPMPF = mpsliban.c mpsioan.c lockfr.c thfri4.c pthrdext.c vmfr.c \
protfr.c protfri3.c proti3.c prmci3fr.c ssfri3.c span.c
SWPF = than.c vmfr.c protsw.c prmcan.c ssan.c
LIBS = -lm -pthread
include gc.gmk
CC = cc
# Suppress some warnings (SuSE).
# .void: -Wpointer-arith cannot be used because the string.h header does
# arithmetic on void*.
CFLAGSCOMPILER := $(subst -Wpointer-arith,,$(CFLAGSCOMPILER))
include comm.gmk

258
mps/src/lockfr.c Normal file
View file

@ -0,0 +1,258 @@
/* impl.c.lockli: RECURSIVE LOCKS FOR POSIX SYSTEMS
*
* $HopeName$
* Copyright (C) 2000 Harlequin Limited. All rights reserved.
*
* .linux: This implementation currently just supports LinuxThreads
* (platform MPS_OS_LI), Single Unix i/f.
*
* .posix: In fact, the implementation should be reusable for most POSIX
* implementations, but may need some customization for each.
*
* .design: These locks are implemented using mutexes.
*
* .recursive: Mutexes support both non-recursive and recursive locking, but
* only at initialization time. This doesn't match the API of MPS Lock module,
* which chooses at locking time, so all locks are made (non-recursive)
* errorchecking. Recursive locks are implemented by checking the error
* code.
*
* .claims: During use the claims field is updated to remember the number of
* claims acquired on a lock. This field must only be modified
* while we hold the mutex.
*/
#define _XOPEN_SOURCE 500
#include <pthread.h>
#include <semaphore.h>
#include <errno.h>
#include "mpmtypes.h"
#include "lock.h"
#include "config.h"
#ifndef MPS_OS_LI
#error "lockli.c is specific to LinuxThreads but MPS_OS_LI not defined"
#endif
SRCID(lockli, "$HopeName$");
/* LockAttrSetRecursive -- Set mutexattr to permit recursive locking
*
* There's a standard way to do this - but early LinuxThreads doesn't
* quite follow the standard. Some other implementations might not
* either.
*/
#ifdef OLD_LINUXTHREADS
#define LockAttrSetRecursive(attrptr) \
pthread_mutexattr_setkind_np(attrptr, PTHREAD_MUTEX_ERRORCHECK_NP)
#else
#define LockAttrSetRecursive(attrptr) \
pthread_mutexattr_settype(attrptr, PTHREAD_MUTEX_ERRORCHECK)
#endif
/* LockStruct -- the MPS lock structure
*
* .lock.posix: Posix lock structure; uses a mutex.
*/
typedef struct LockStruct {
Sig sig; /* design.mps.sig */
unsigned long claims; /* # claims held by owner */
pthread_mutex_t mut; /* the mutex itself */
} LockStruct;
/* LockSize -- size of a LockStruct */
size_t LockSize(void)
{
return sizeof(LockStruct);
}
/* LockCheck -- check a lock */
Bool LockCheck(Lock lock)
{
CHECKS(Lock, lock);
/* While claims can't be very large, I don't dare to put a limit on it. */
/* There's no way to test the mutex, or check if it's held by somebody. */
return TRUE;
}
/* LockInit -- initialize a lock */
void LockInit(Lock lock)
{
pthread_mutexattr_t attr;
int res;
AVER(lock != NULL);
lock->claims = 0;
res = pthread_mutexattr_init(&attr);
AVER(res == 0);
res = LockAttrSetRecursive(&attr);
AVER(res == 0);
res = pthread_mutex_init(&lock->mut, &attr);
AVER(res == 0);
res = pthread_mutexattr_destroy(&attr);
AVER(res == 0);
lock->sig = LockSig;
AVERT(Lock, lock);
}
/* LockFinish -- finish a lock */
void LockFinish(Lock lock)
{
int res;
AVERT(Lock, lock);
/* Lock should not be finished while held */
AVER(lock->claims == 0);
res = pthread_mutex_destroy(&lock->mut);
AVER(res == 0);
lock->sig = SigInvalid;
}
/* LockClaim -- claim a lock (non-recursive) */
void LockClaim(Lock lock)
{
int res;
AVERT(Lock, lock);
res = pthread_mutex_lock(&lock->mut);
/* pthread_mutex_lock will error if we own the lock already. */
AVER(res == 0);
/* This should be the first claim. Now we own the mutex */
/* it is ok to check this. */
AVER(lock->claims == 0);
lock->claims = 1;
}
/* LockReleaseMPM -- release a lock (non-recursive) */
void LockReleaseMPM(Lock lock)
{
int res;
AVERT(Lock, lock);
AVER(lock->claims == 1); /* The lock should only be held once */
lock->claims = 0; /* Must set this before releasing the lock */
res = pthread_mutex_unlock(&lock->mut);
/* pthread_mutex_unlock will error if we didn't own the lock. */
AVER(res == 0);
}
/* LockClaimRecursive -- claim a lock (recursive) */
void LockClaimRecursive(Lock lock)
{
int res;
AVERT(Lock, lock);
res = pthread_mutex_lock(&lock->mut);
/* pthread_mutex_lock will return: */
/* 0 if we have just claimed the lock */
/* EDEADLK if we own the lock already. */
AVER((res == 0 && lock->claims == 0) ||
(res == EDEADLK && lock->claims > 0));
++lock->claims;
AVER(lock->claims > 0);
}
/* LockReleaseRecursive -- release a lock (recursive) */
void LockReleaseRecursive(Lock lock)
{
int res;
AVERT(Lock, lock);
AVER(lock->claims > 0);
--lock->claims;
if (lock->claims == 0) {
res = pthread_mutex_unlock(&lock->mut);
/* pthread_mutex_unlock will error if we didn't own the lock. */
AVER(res == 0);
}
}
/* Global locks
*
* .global: The two "global" locks are statically allocated normal locks.
*/
static LockStruct globalLockStruct;
static LockStruct globalRecLockStruct;
static Lock globalLock = &globalLockStruct;
static Lock globalRecLock = &globalRecLockStruct;
static pthread_once_t isGlobalLockInit = PTHREAD_ONCE_INIT;
static void globalLockInit(void)
{
LockInit(globalLock);
LockInit(globalRecLock);
}
/* LockClaimGlobalRecursive -- claim the global recursive lock */
void LockClaimGlobalRecursive(void)
{
int res;
/* Ensure the global lock has been initialized */
res = pthread_once(&isGlobalLockInit, globalLockInit);
AVER(res == 0);
LockClaimRecursive(globalRecLock);
}
/* LockReleaseGlobalRecursive -- release the global recursive lock */
void LockReleaseGlobalRecursive(void)
{
LockReleaseRecursive(globalRecLock);
}
/* LockClaimGlobal -- claim the global non-recursive lock */
void LockClaimGlobal(void)
{
int res;
/* Ensure the global lock has been initialized */
res = pthread_once(&isGlobalLockInit, globalLockInit);
AVER(res == 0);
LockClaim(globalLock);
}
/* LockReleaseGlobal -- release the global non-recursive lock */
void LockReleaseGlobal(void)
{
LockReleaseMPM(globalLock);
}

View file

@ -160,7 +160,7 @@
/* GCC 2.7.2.1, gcc -E -dM -traditional-cpp and <URL:http://developer.apple.c
* om/techpubs/macosx/System/Documentation/Developer/YellowBox/Reference/DevT
* ools/Preprocessor/Preprocessor.[ef].html>
* ools/Preprocessor/Preprocessor.[ef].html>
*/
#elif defined(__APPLE__) && defined(__ppc__) && defined(__MACH__) && defined(__GNUC__)
@ -291,6 +291,20 @@
#define MPS_WORD_SHIFT 5
#define MPS_PF_ALIGN 8 /* @@@@ not tested */
/* GCC 2.95.3, gcc -E -dM
*/
#elif defined(__FreeBSD__) && defined (__i386__) && defined (__GNUC__)
#define MPS_PF_FRI4GC
#define MPS_PF_STRING "fri4gc"
#define MPS_OS_FR
#define MPS_ARCH_I4
#define MPS_BUILD_GC
#define MPS_T_WORD unsigned long
#define MPS_WORD_WIDTH 32
#define MPS_WORD_SHIFT 5
#define MPS_PF_ALIGN 4
#else
#error "Unable to detect target platform"
#endif

86
mps/src/prmci3fr.c Normal file
View file

@ -0,0 +1,86 @@
/* impl.c.prmci3li: PROTECTION MUTATOR CONTEXT INTEL 386 (LINUX)
*
* $HopeName: MMsrc!prmci3li.c(trunk.3) $
* Copyright (C) 1999 Harlequin Limited. All rights reserved.
*
* .purpose: This module implements the part of the protection module
* that decodes the MutatorFaultContext.
*
*
* SOURCES
*
* .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual
*
* .source.linux.kernel: Linux kernel source files.
*
*
* ASSUMPTIONS
*
* .assume.regref: The resisters in the context can be modified by
* storing into an MRef pointer.
*/
/* prmcli.h will include mpm.h after defining open sesame magic */
#include "prmcli.h"
#include "prmci3.h"
SRCID(prmci3li, "$HopeName$");
/* Prmci3AddressHoldingReg -- return an address of a register in a context */
MRef Prmci3AddressHoldingReg(MutatorFaultContext context, unsigned int regnum)
{
struct sigcontext *scp;
AVER(regnum <= 7);
AVER(regnum >= 0);
scp = context->scp;
/* .source.i486 */
/* .assume.regref */
switch (regnum) {
case 0: return (MRef)&scp->eax;
case 1: return (MRef)&scp->ecx;
case 2: return (MRef)&scp->edx;
case 3: return (MRef)&scp->ebx;
case 4: return (MRef)&scp->esp;
case 5: return (MRef)&scp->ebp;
case 6: return (MRef)&scp->esi;
case 7: return (MRef)&scp->edi;
}
NOTREACHED;
return (MRef)NULL; /* Keep compiler happy. */
}
/* Prmci3DecodeFaultContext -- decode fault to find faulting address and IP */
void Prmci3DecodeFaultContext(MRef *faultmemReturn,
Byte **insvecReturn,
MutatorFaultContext context)
{
struct sigcontext *scp;
scp = context->scp;
/* Assert that this is a page fault exception. The computation of */
/* faultmem depends on this. See .source.i486 (9.9.14). */
AVER(scp->trapno == 14);
/* cr2 contains the address which caused the fault. */
/* See .source.i486 (9.9.14) and */
/* .source.linux.kernel (linux/arch/i386/mm/fault.c). */
*faultmemReturn = (MRef)scp->cr2;
*insvecReturn = (Byte*)scp->eip;
}
/* Prmci3StepOverIns -- modify context to step over instruction */
void Prmci3StepOverIns(MutatorFaultContext context, Size inslen)
{
context->scp->eip += (unsigned long)inslen;
}

90
mps/src/protfr.c Normal file
View file

@ -0,0 +1,90 @@
/* impl.c.protli: PROTECTION FOR LINUX
*
* $HopeName: $
* Copyright (C) 1995,1999 Harlequin Group, all rights reserved
*
*/
#include "mpm.h"
#ifndef MPS_OS_LI
#error "protli.c is Linux specific, but MPS_OS_LI is not set"
#endif
#ifndef PROTECTION
#error "protli.c implements protection, but PROTECTION is not set"
#endif
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <sys/mman.h>
SRCID(protli, "$HopeName: $");
/* ProtSet -- set protection
*
* This is just a thin veneer on top of mprotect(2).
*/
void ProtSet(Addr base, Addr limit, AccessSet mode)
{
int flags;
int res;
AVER(sizeof(int) == sizeof(Addr)); /* should be redundant; will fail on Alpha */
AVER(base < limit);
AVER(base != 0);
AVER(AddrOffset(base, limit) <= INT_MAX); /* should be redundant */
#if 0
/* .flags.trouble: This less strict version of flags (which allows write
* access unless explicitly told not to) caused mmqa test 37 to fail.
* This might be a bug in MPS, so for now we go with the stricter
* version that matches the Win32 implementation. */
flags = 0;
if((mode & AccessREAD) == 0)
flags |= PROT_READ | PROT_EXEC;
if((mode & AccessWRITE) == 0)
flags |= PROT_WRITE;
#endif
flags = PROT_READ | PROT_WRITE | PROT_EXEC;
if((mode & AccessWRITE) != 0)
flags = PROT_READ | PROT_EXEC;
if((mode & AccessREAD) != 0)
flags = 0;
res = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags);
AVER(res == 0);
}
/* ProtSync -- synchronize protection settings with hardware
*
* This does nothing under Linux.
*/
void ProtSync(Arena arena)
{
NOOP;
}
/* ProtTramp -- protection trampoline
*
* The protection trampoline is trivial under Linux, as there is nothing
* that needs to be done in the dynamic context of the mutator in order
* to catch faults. (Contrast this with Win32 Structured Exception
* Handling.)
*/
void ProtTramp(void **resultReturn, void *(*f)(void *, size_t),
void *p, size_t s)
{
AVER(resultReturn != NULL);
AVER(FUNCHECK(f));
/* Can't check p and s as they are interpreted by the client */
*resultReturn = (*f)(p, s);
}

151
mps/src/protfri3.c Normal file
View file

@ -0,0 +1,151 @@
/* impl.c.protlii3: PROTECTION FOR LINUX (INTEL 386)
*
* $HopeName: MMsrc!protlii3.c(trunk.3) $
* Copyright (C) 2000 Harlequin Limited. All rights reserved.
*
* SOURCES
*
* .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual
*
* .source.linux.kernel: Linux kernel source files.
*/
#include "prmcli.h"
#ifndef MPS_OS_LI
#error "protlii3.c is Linux-specific, but MPS_OS_LI is not set"
#endif
#if !defined(MPS_ARCH_I3) && !defined(MPS_ARCH_I4)
#error "protlii3.c is Intel-specific, but MPS_ARCH_I3 or MPS_ARCH_I4 is not set"
#endif
#ifndef PROTECTION
#error "protlii3.c implements protection, but PROTECTION is not set"
#endif
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <signal.h>
SRCID(protlii3, "$HopeName: MMsrc!protlii3.c(trunk.3) $");
/* Useful stuff that doesn't appear to be in any header files. */
/* Interrupt number 14 is Page Fault. */
#define TRAPNO_PAGE_FAULT 14
/* Bits in err field of sigcontext for interrupt 14 (page fault) */
#define PAGE_FAULT_ERR_PAGEPROT 0x1
#define PAGE_FAULT_ERR_WRITE 0x2
#define PAGE_FAULT_ERR_USERMODE 0x4
/* The previously-installed signal action, as returned by */
/* sigaction(3). See ProtSetup. */
static struct sigaction sigNext;
typedef void (*__real_lii3_sighandler_t)(int, struct sigcontext);
/* sigHandle -- protection signal handler
*
* This is the signal handler installed by ProtSetup to deal with
* protection faults. It is installed on the SIGSEGV signal.
* It decodes the protection fault details from the signal context
* and passes them to ArenaAccess, which attempts to handle the
* fault and remove its cause. If the fault is handled, then
* the handler returns and execution resumes. If it isn't handled,
* then sigHandle does its best to pass the signal on to the
* previously installed signal handler (sigNext).
*
* .sigh.args: There is no officially documented way of getting the
* sigcontext, but on x86 Linux at least it is passed BY VALUE as a
* second argument to the signal handler. The prototype doesn't
* include this arg.
* See .source.linux.kernel (linux/arch/i386/kernel/signal.c).
*
* .sigh.context: We only know how to handle interrupt 14, where
* context.err gives the page fault error code and context.cr2 gives
* the fault address. See .source.i486 (9.9.14) and
* .source.linux.kernel (linux/arch/i386/mm/fault.c).
*
* .sigh.addr: We assume that the OS decodes the address to something
* sensible
*/
static void sigHandle(int sig, struct sigcontext context) /* .sigh.args */
{
AVER(sig == SIGSEGV);
if(context.trapno == TRAPNO_PAGE_FAULT) { /* .sigh.context */
AccessSet mode;
Addr base, limit;
MutatorFaultContextStruct mfContext;
mfContext.scp = &context;
mode = ((context.err & PAGE_FAULT_ERR_WRITE) != 0) /* .sigh.context */
? (AccessREAD | AccessWRITE)
: AccessREAD;
/* We assume that the access is for one word at the address. */
base = (Addr)context.cr2; /* .sigh.addr */
limit = AddrAdd(base, (Size)sizeof(Addr));
/* Offer each protection structure the opportunity to handle the */
/* exception. If it succeeds, then allow the mutator to continue. */
if(ArenaAccess(base, mode, &mfContext))
return;
}
/* The exception was not handled by any known protection structure, */
/* so throw it to the previously installed handler. */
/* @@@@ This is really weak. */
/* Need to implement rest of the contract of sigaction */
/* We might also want to set SA_RESETHAND in the flags and explicitly */
/* reinstall the handler from withint itself so the SIG_DFL/SIG_IGN */
/* case can work properly by just returning. */
switch ((int)sigNext.sa_handler) {
case (int)SIG_DFL:
case (int)SIG_IGN:
abort();
NOTREACHED;
break;
default:
(*(__real_lii3_sighandler_t)sigNext.sa_handler)(sig, context);
break;
}
}
/* ProtSetup -- global protection setup
*
* Under Linux, the global setup involves installing a signal handler
* on SIGSEGV to catch and handle page faults (see sigHandle).
* The previous handler is recorded so that it can be reached from
* sigHandle if it fails to handle the fault.
*
* NOTE: There are problems with this approach:
* 1. we can't honor the sa_flags for the previous handler,
* 2. what if this thread is suspended just after calling signal(3)?
* The sigNext variable will never be initialized!
*/
void ProtSetup(void)
{
struct sigaction sa;
int result;
sa.sa_handler = (__sighandler_t)sigHandle; /* .sigh.args */
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
result = sigaction(SIGSEGV, &sa, &sigNext);
AVER(result == 0);
}

58
mps/src/ssfri3.c Normal file
View file

@ -0,0 +1,58 @@
/* impl.c.sslii3: LINUX/INTEL STACK SCANNING
*
* $HopeName: MMsrc!sslii3.c(trunk.1) $
* Copyright (C) 1999. Harlequin Group plc. All rights reserved.
*
* This scans the stack and fixes the registers which may contain
* roots. See design.mps.thread-manager
*
* The registers edi, esi, ebx are the registers defined to be preserved
* across function calls and therefore may contain roots.
* These are pushed on the stack for scanning.
*
* SOURCES
*
* .source.callees.saves: Set of callee-saved registers taken from
* CALL_USED_REGISTERS in <gcc-sources>/config/i386/i386.h.
*
* ASSUMPTIONS
*
* .assume.align: The stack pointer is assumed to be aligned on a word
* boundary.
*
* .assume.asm.stack: The compiler must not do wacky things with the
* stack pointer around a call since we need to ensure that the
* callee-save regs are visible during TraceScanArea.
*
* .assume.asm.order: The volatile modifier should prevent movement
* of code, which might break .assume.asm.stack.
*
*/
#include "mpm.h"
SRCID(sslii3, "$HopeName: MMsrc!sslii3.c(trunk.1) $");
/* .assume.asm.order */
#define ASMV(x) __asm__ volatile (x)
Res StackScan(ScanState ss, Addr *stackBot)
{
Addr *stackTop;
Res res;
/* .assume.asm.stack */
ASMV("push %ebx"); /* These registers are callee-saved */
ASMV("push %esi"); /* and so may contain roots. They are pushed */
ASMV("push %edi"); /* for scanning. See .source.callees.saves. */
ASMV("mov %%esp, %0" : "=r" (stackTop) :); /* stackTop = esp */
AVER(AddrIsAligned((Addr)stackTop, sizeof(Addr))); /* .assume.align */
res = TraceScanArea(ss, stackTop, stackBot);
ASMV("add $12, %esp"); /* pop 3 regs to restore the stack pointer */
return res;
}

303
mps/src/thfri4.c Normal file
View file

@ -0,0 +1,303 @@
/* impl.c.thlii3: Threads Manager for Intel x86 systems with LinuxThreads
*
* $HopeName: MMsrc!thlii4.c(trunk.2) $
* Copyright (C) 2000 Harlequin Limited. All rights reserved.
*
* .purpose: This is a pthreads implementation of the threads manager.
* This implements impl.h.th.
*
* .design: See design.mps.thread-manager.
*
* .thread.id: The thread id is used to identify the current thread.
*
* ASSUMPTIONS
*
* .error.resume: PThreadextResume is assumed to succeed unless the thread
* has been destroyed.
* .error.suspend: PThreadextSuspend is assumed to succeed unless the thread
* has been destroyed. In this case, the suspend context is set to NULL;
*
* .stack.full-descend: assumes full descending stack.
* i.e. stack pointer points to the last allocated location;
* stack grows downwards.
*
* .stack.below-bottom: it's legal for the stack pointer to be at a
* higher address than the registered bottom of stack. This might
* happen if the stack of another thread doesn't contain any frames
* belonging to the client language. In this case, the stack should
* not be scanned.
*
* .stack.align: assume roots on the stack are always word-aligned,
* but don't assume that the stack pointer is necessarily
* word-aligned at the time of reading the context of another thread.
*
* .sp: The stack pointer in the context is ESP.
* .context.regroots: The root regs are EDI, ESI, EBX, EDX, ECX, EAX are
* assumed to be recorded in the context at pointer-aligned boundaries.
*/
#include "prmcli.h"
#include "mpm.h"
#if !defined(MPS_OS_LI) || !defined(MPS_ARCH_I4)
#error "Compiling thlii4 when MPS_OS_LI or MPS_ARCH_I4 not defined."
#endif
#include <pthread.h>
#include "pthrdext.h"
SRCID(thlii4, "$HopeName: MMsrc!thlii4.c(trunk.2) $");
/* ThreadStruct -- thread desriptor */
typedef struct ThreadStruct { /* PThreads thread structure */
Sig sig; /* design.mps.sig */
Serial serial; /* from arena->threadSerial */
Arena arena; /* owning arena */
RingStruct arenaRing; /* threads attached to arena */
PThreadextStruct thrextStruct; /* PThreads extension */
pthread_t id; /* Pthread object of thread */
struct sigcontext *scpSusp; /* Context if thread is suspended */
} ThreadStruct;
/* ThreadCheck -- check a thread */
Bool ThreadCheck(Thread thread)
{
CHECKS(Thread, thread);
CHECKU(Arena, thread->arena);
CHECKL(thread->serial < thread->arena->threadSerial);
CHECKL(RingCheck(&thread->arenaRing));
CHECKD(PThreadext, &thread->thrextStruct);
return TRUE;
}
Bool ThreadCheckSimple(Thread thread)
{
CHECKS(Thread, thread);
return TRUE;
}
/* ThreadRegister -- register a thread with an arena */
Res ThreadRegister(Thread *threadReturn, Arena arena)
{
Res res;
Thread thread;
void *p;
AVER(threadReturn != NULL);
AVERT(Arena, arena);
res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
/* withReservoirPermit */ FALSE);
if(res != ResOK)
return res;
thread = (Thread)p;
thread->id = pthread_self();
RingInit(&thread->arenaRing);
thread->sig = ThreadSig;
thread->serial = arena->threadSerial;
++arena->threadSerial;
thread->arena = arena;
thread->scpSusp = NULL;
PThreadextInit(&thread->thrextStruct, thread->id);
AVERT(Thread, thread);
RingAppend(ArenaThreadRing(arena), &thread->arenaRing);
*threadReturn = thread;
return ResOK;
}
/* ThreadDeregister -- deregister a thread from an arena */
void ThreadDeregister(Thread thread, Arena arena)
{
AVERT(Thread, thread);
AVERT(Arena, arena);
RingRemove(&thread->arenaRing);
thread->sig = SigInvalid;
RingFinish(&thread->arenaRing);
PThreadextFinish(&thread->thrextStruct);
ControlFree(arena, thread, sizeof(ThreadStruct));
}
/* mapThreadRing -- map over threads on ring calling a function on each one
* except the current thread
*/
static void mapThreadRing(Ring threadRing, void (*func)(Thread))
{
Ring node, next;
pthread_t self;
AVERT(Ring, threadRing);
self = pthread_self();
RING_FOR(node, threadRing, next) {
Thread thread = RING_ELT(Thread, arenaRing, node);
AVERT(Thread, thread);
if(! pthread_equal(self, thread->id)) /* .thread.id */
(*func)(thread);
}
}
/* ThreadRingSuspend -- suspend all threads on a ring, expect the current one */
static void threadSuspend(Thread thread)
{
/* .error.suspend */
/* In the error case (PThreadextSuspend returning ResFAIL), we */
/* assume the thread has been destroyed. */
/* In which case we simply continue. */
Res res;
res = PThreadextSuspend(&thread->thrextStruct, &thread->scpSusp);
if(res != ResOK)
thread->scpSusp = NULL;
}
void ThreadRingSuspend(Ring threadRing)
{
mapThreadRing(threadRing, threadSuspend);
}
/* ThreadRingResume -- resume all threads on a ring (expect the current one) */
static void threadResume(Thread thread)
{
/* .error.resume */
/* If the previous suspend failed (thread->scpSusp == NULL), */
/* or in the error case (PThreadextResume returning ResFAIL), */
/* assume the thread has been destroyed. */
/* In which case we simply continue. */
if(thread->scpSusp != NULL) {
(void)PThreadextResume(&thread->thrextStruct);
thread->scpSusp = NULL;
}
}
void ThreadRingResume(Ring threadRing)
{
mapThreadRing(threadRing, threadResume);
}
/* ThreadRingThread -- return the thread at the given ring element */
Thread ThreadRingThread(Ring threadRing)
{
Thread thread;
AVERT(Ring, threadRing);
thread = RING_ELT(Thread, arenaRing, threadRing);
AVERT(Thread, thread);
return thread;
}
/* ThreadArena -- get the arena of a thread
*
* Must be thread-safe. See design.mps.interface.c.thread-safety.
*/
Arena ThreadArena(Thread thread)
{
/* Can't check thread as that would not be thread-safe. */
return thread->arena;
}
/* ThreadScan -- scan the state of a thread (stack and regs) */
Res ThreadScan(ScanState ss, Thread thread, void *stackBot)
{
pthread_t self;
Res res;
AVERT(Thread, thread);
self = pthread_self();
if(pthread_equal(self, thread->id)) {
/* scan this thread's stack */
res = StackScan(ss, stackBot);
if(res != ResOK)
return res;
} else {
struct sigcontext *scp;
Addr *stackBase, *stackLimit, stackPtr;
scp = thread->scpSusp;
if(scp == NULL) {
/* .error.suspend */
/* We assume that the thread must have been destroyed. */
/* We ignore the situation by returning immediately. */
return ResOK;
}
stackPtr = (Addr)scp->esp; /* .i3.sp */
/* .stack.align */
stackBase = (Addr *)AddrAlignUp(stackPtr, sizeof(Addr));
stackLimit = (Addr *)stackBot;
if (stackBase >= stackLimit)
return ResOK; /* .stack.below-bottom */
/* scan stack inclusive of current sp and exclusive of
* stackBot (.stack.full-descend)
*/
res = TraceScanAreaTagged(ss, stackBase, stackLimit);
if(res != ResOK)
return res;
/* (.context.regroots)
* This scans the root registers (.context.regroots). It also
* unecessarily scans the rest of the context. The optimisation
* to scan only relevent parts would be machine dependent.
*/
res = TraceScanAreaTagged(ss, (Addr *)scp,
(Addr *)((char *)scp + sizeof(*scp)));
if(res != ResOK)
return res;
}
return ResOK;
}
/* ThreadDescribe -- describe a thread */
Res ThreadDescribe(Thread thread, mps_lib_FILE *stream)
{
Res res;
res = WriteF(stream,
"Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial,
" arena $P ($U)\n",
(WriteFP)thread->arena, (WriteFU)thread->arena->serial,
" id $U\n", (WriteFU)thread->id,
"} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial,
NULL);
if(res != ResOK)
return res;
return ResOK;
}

292
mps/src/vmfr.c Normal file
View file

@ -0,0 +1,292 @@
/* impl.c.vmli: VIRTUAL MEMORY MAPPING FOR LINUX
*
* $HopeName: MMsrc!vmli.c(trunk.7) $
* Copyright (C) 2000 Harlequin Limited. All rights reserved.
*
* .purpose: This is the implementation of the virtual memory mapping
* interface (vm.h) for Linux. It was created by copying vmo1.c (the
* DIGITAL UNIX implementation) as that seemed to be closest.
*
* .design: See design.mps.vm. .design.linux: mmap(2) is used to
* reserve address space by creating a mapping with page access none.
* mmap(2) is used to map pages onto store by creating a copy-on-write
* (MAP_PRIVATE) mapping with the flag MAP_ANONYMOUS.
*
* .assume.not-last: The implementation of VMCreate assumes that
* mmap() will not choose a region which contains the last page
* in the address space, so that the limit of the mapped area
* is representable.
*
* .assume.mmap.err: ENOMEM is the only error we really expect to
* get from mmap. The others are either caused by invalid params
* or features we don't use. See mmap(2) for details.
*
* .assume.off_t: We assume that the Size type (defined by the MM) fits
* in the off_t type (define by the system (POSIX?)). In fact we test
* the more stringent requirement that they are the same size. This
* assumption is made in VMUnmap.
*
* .remap: Possibly this should use mremap to reduce the number of
* distinct mappings. According to our current testing, it doesn't
* seem to be a problem.
*/
/* Use all extensions */
#define _GNU_SOURCE 1
/* for open(2) */
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
/* for mmap(2), munmap(2) */
#include <sys/mman.h>
/* for errno(2) */
#include <errno.h>
/* for sysconf(2), close(2) */
#include <unistd.h>
#include "mpm.h"
#ifndef MPS_OS_LI
#error "vmli.c is LINUX specific, but MPS_OS_LI is not set"
#endif
SRCID(vmli, "$HopeName: MMsrc!vmli.c(trunk.7) $");
/* VMStruct -- virtual memory structure */
#define VMSig ((Sig)0x519B3999) /* SIGnature VM */
typedef struct VMStruct {
Sig sig; /* design.mps.sig */
Align align; /* page size */
Addr base, limit; /* boundaries of reserved space */
Size reserved; /* total reserved address space */
Size mapped; /* total mapped memory */
} VMStruct;
/* VMAlign -- return page size */
Align VMAlign(VM vm)
{
return vm->align;
}
/* VMCheck -- check a VM */
Bool VMCheck(VM vm)
{
CHECKS(VM, vm);
CHECKL(vm->base != 0);
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(SizeIsP2(vm->align));
CHECKL(AddrIsAligned(vm->base, vm->align));
CHECKL(AddrIsAligned(vm->limit, vm->align));
return TRUE;
}
/* VMCreate -- reserve some virtual address space, and create a VM structure */
Res VMCreate(VM *vmReturn, Size size)
{
Align align;
VM vm;
long pagesize;
void *addr;
Res res;
AVER(vmReturn != NULL);
/* sysconf code copied wholesale from vmso.c */
/* Find out the page size from the OS */
pagesize = sysconf(_SC_PAGESIZE);
/* check the actual returned pagesize will fit in an object of */
/* type Align. */
AVER(pagesize > 0);
AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
/* Note implicit conversion from "long" to "Align". */
align = pagesize;
AVER(SizeIsP2(align));
size = SizeAlignUp(size, align);
if((size == 0) || (size > (Size)(size_t)-1))
return ResRESOURCE;
/* Map in a page to store the descriptor on. */
addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if(addr == MAP_FAILED) {
int e = errno;
AVER(e == ENOMEM); /* .assume.mmap.err */
return ResMEMORY;
}
vm = (VM)addr;
vm->align = align;
/* See .assume.not-last. */
addr = mmap(0, (size_t)size,
PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE,
-1, 0);
if(addr == MAP_FAILED) {
int e = errno;
AVER(e == ENOMEM); /* .assume.mmap.err */
res = ResRESOURCE;
goto failReserve;
}
vm->base = (Addr)addr;
vm->limit = AddrAdd(vm->base, size);
vm->reserved = size;
vm->mapped = (Size)0;
vm->sig = VMSig;
AVERT(VM, vm);
EVENT_PAA(VMCreate, vm, vm->base, vm->limit);
*vmReturn = vm;
return ResOK;
failReserve:
(void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
return res;
}
/* VMDestroy -- release all address space and destroy VM structure */
void VMDestroy(VM vm)
{
int r;
AVERT(VM, vm);
AVER(vm->mapped == (Size)0);
/* This appears to be pretty pointless, since the descriptor */
/* page is about to vanish completely. However, munmap might fail */
/* for some reason, and this would ensure that it was still */
/* discovered if sigs were being checked. */
vm->sig = SigInvalid;
r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
AVER(r == 0);
r = munmap((void *)vm,
(size_t)SizeAlignUp(sizeof(VMStruct), vm->align));
AVER(r == 0);
EVENT_P(VMDestroy, vm);
}
/* VMBase -- return the base address of the memory reserved */
Addr VMBase(VM vm)
{
AVERT(VM, vm);
return vm->base;
}
/* VMLimit -- return the limit address of the memory reserved */
Addr VMLimit(VM vm)
{
AVERT(VM, vm);
return vm->limit;
}
/* VMReserved -- return the amount of memory reserved */
Size VMReserved(VM vm)
{
AVERT(VM, vm);
return vm->reserved;
}
/* VMMapped -- return the amount of memory actually mapped */
Size VMMapped(VM vm)
{
AVERT(VM, vm);
return vm->mapped;
}
/* VMMap -- map the given range of memory */
Res VMMap(VM vm, Addr base, Addr limit)
{
Size size;
AVERT(VM, vm);
AVER(sizeof(void *) == sizeof(Addr));
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->align));
AVER(AddrIsAligned(limit, vm->align));
size = AddrOffset(base, limit);
if(mmap((void *)base, (size_t)size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED,
-1, 0)
== MAP_FAILED) {
AVER(errno == ENOMEM); /* .assume.mmap.err */
return ResMEMORY;
}
vm->mapped += size;
EVENT_PAA(VMMap, vm, base, limit);
return ResOK;
}
/* VMUnmap -- unmap the given range of memory */
void VMUnmap(VM vm, Addr base, Addr limit)
{
Size size;
void *addr;
AVERT(VM, vm);
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->align));
AVER(AddrIsAligned(limit, vm->align));
AVER(sizeof(off_t) == sizeof(Size)); /* .assume.off_t */
size = AddrOffset(base, limit);
/* see design.mps.vmo1.fun.unmap.offset */
addr = mmap((void *)base, (size_t)size,
PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED,
-1, 0);
AVER(addr == (void *)base);
vm->mapped -= size;
EVENT_PAA(VMUnmap, vm, base, limit);
}