1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2025-12-25 15:00:45 -08:00
emacs/mps/code/vmfr.c
Richard Brooksby 3d5e2ca85f Adding hopenames back into the master sources, so that they can be included in the union sources along with the id keywords.
This was achieved by partially undoing changelist 24817, including an accidental corruption of eventgen.pl.

Copied from Perforce
 Change: 24877
 ServerID: perforce.ravenbrook.com
2001-12-06 18:14:02 +00:00

279 lines
6.3 KiB
C

/* impl.c.vmfr: VIRTUAL MEMORY MAPPING FOR FreeBSD
*
* $Id$
* $HopeName: MMsrc!vmli.c(trunk.7) $
* Copyright (c) 2001 Ravenbrook Limited.
*
* .purpose: This is the implementation of the virtual memory mapping
* interface (vm.h) for FreeBSD. It was created by copying vmli.c (the
* DIGITAL UNIX implementation) as that seemed to be closest.
*
* .design: See design.mps.vm. .design.freebsd: mmap(2) is used to
* reserve address space by creating a mapping with page access none.
* mmap(2) is used to map pages onto store by creating a copy-on-write
* (MAP_PRIVATE) mapping with the flag MAP_ANON.
*
* .assume.not-last: The implementation of VMCreate assumes that
* mmap() will not choose a region which contains the last page
* in the address space, so that the limit of the mapped area
* is representable.
*
* .assume.mmap.err: ENOMEM is the only error we really expect to
* get from mmap. The others are either caused by invalid params
* or features we don't use. See mmap(2) for details.
*
* .remap: Possibly this should use mremap to reduce the number of
* distinct mappings. According to our current testing, it doesn't
* seem to be a problem.
*/
/* for mmap(2), munmap(2) */
#include <sys/types.h>
#include <sys/mman.h>
/* for errno(2) */
#include <errno.h>
/* for getpagesize(3) */
#include <unistd.h>
#include "mpm.h"
#ifndef MPS_OS_FR
#error "vmfr.c is FreeBSD specific, but MPS_OS_FR is not set"
#endif
SRCID(vmfr, "$Id$");
/* VMStruct -- virtual memory structure */
#define VMSig ((Sig)0x519B3999) /* SIGnature VM */
typedef struct VMStruct {
Sig sig; /* design.mps.sig */
Align align; /* page size */
Addr base, limit; /* boundaries of reserved space */
Size reserved; /* total reserved address space */
Size mapped; /* total mapped memory */
} VMStruct;
/* VMAlign -- return page size */
Align VMAlign(VM vm)
{
return vm->align;
}
/* VMCheck -- check a VM */
Bool VMCheck(VM vm)
{
CHECKS(VM, vm);
CHECKL(vm->base != 0);
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(SizeIsP2(vm->align));
CHECKL(AddrIsAligned(vm->base, vm->align));
CHECKL(AddrIsAligned(vm->limit, vm->align));
return TRUE;
}
/* VMCreate -- reserve some virtual address space, and create a VM structure */
Res VMCreate(VM *vmReturn, Size size)
{
Align align;
VM vm;
int pagesize;
void *addr;
Res res;
AVER(vmReturn != NULL);
/* Find out the page size from the OS */
pagesize = getpagesize();
/* check the actual returned pagesize will fit in an object of */
/* type Align. */
AVER(pagesize > 0);
AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
/* Note implicit conversion from "int" to "Align". */
align = pagesize;
AVER(SizeIsP2(align));
size = SizeAlignUp(size, align);
if((size == 0) || (size > (Size)(size_t)-1))
return ResRESOURCE;
/* Map in a page to store the descriptor on. */
addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE,
-1, 0);
if(addr == MAP_FAILED) {
int e = errno;
AVER(e == ENOMEM); /* .assume.mmap.err */
return ResMEMORY;
}
vm = (VM)addr;
vm->align = align;
/* See .assume.not-last. */
addr = mmap(0, (size_t)size,
PROT_NONE, MAP_ANON | MAP_PRIVATE,
-1, 0);
if(addr == MAP_FAILED) {
int e = errno;
AVER(e == ENOMEM); /* .assume.mmap.err */
res = ResRESOURCE;
goto failReserve;
}
vm->base = (Addr)addr;
vm->limit = AddrAdd(vm->base, size);
vm->reserved = size;
vm->mapped = (Size)0;
vm->sig = VMSig;
AVERT(VM, vm);
EVENT_PAA(VMCreate, vm, vm->base, vm->limit);
*vmReturn = vm;
return ResOK;
failReserve:
(void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
return res;
}
/* VMDestroy -- release all address space and destroy VM structure */
void VMDestroy(VM vm)
{
int r;
AVERT(VM, vm);
AVER(vm->mapped == (Size)0);
/* This appears to be pretty pointless, since the descriptor */
/* page is about to vanish completely. However, munmap might fail */
/* for some reason, and this would ensure that it was still */
/* discovered if sigs were being checked. */
vm->sig = SigInvalid;
r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
AVER(r == 0);
r = munmap((void *)vm,
(size_t)SizeAlignUp(sizeof(VMStruct), vm->align));
AVER(r == 0);
EVENT_P(VMDestroy, vm);
}
/* VMBase -- return the base address of the memory reserved */
Addr VMBase(VM vm)
{
AVERT(VM, vm);
return vm->base;
}
/* VMLimit -- return the limit address of the memory reserved */
Addr VMLimit(VM vm)
{
AVERT(VM, vm);
return vm->limit;
}
/* VMReserved -- return the amount of memory reserved */
Size VMReserved(VM vm)
{
AVERT(VM, vm);
return vm->reserved;
}
/* VMMapped -- return the amount of memory actually mapped */
Size VMMapped(VM vm)
{
AVERT(VM, vm);
return vm->mapped;
}
/* VMMap -- map the given range of memory */
Res VMMap(VM vm, Addr base, Addr limit)
{
Size size;
AVERT(VM, vm);
AVER(sizeof(void *) == sizeof(Addr));
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->align));
AVER(AddrIsAligned(limit, vm->align));
size = AddrOffset(base, limit);
if(mmap((void *)base, (size_t)size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANON | MAP_PRIVATE | MAP_FIXED,
-1, 0)
== MAP_FAILED) {
AVER(errno == ENOMEM); /* .assume.mmap.err */
return ResMEMORY;
}
vm->mapped += size;
EVENT_PAA(VMMap, vm, base, limit);
return ResOK;
}
/* VMUnmap -- unmap the given range of memory */
void VMUnmap(VM vm, Addr base, Addr limit)
{
Size size;
void *addr;
AVERT(VM, vm);
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, vm->align));
AVER(AddrIsAligned(limit, vm->align));
size = AddrOffset(base, limit);
/* see design.mps.vmo1.fun.unmap.offset */
addr = mmap((void *)base, (size_t)size,
PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_FIXED,
-1, 0);
AVER(addr == (void *)base);
vm->mapped -= size;
EVENT_PAA(VMUnmap, vm, base, limit);
}