mirror of
git://git.sv.gnu.org/emacs.git
synced 2026-01-24 21:41:48 -08:00
[HAVE_GTK_AND_PTHREAD] Check this after including config.h.
(_aligned_blocks_mutex) [USE_PTHREAD]: New variable. (LOCK_ALIGNED_BLOCKS, UNLOCK_ALIGNED_BLOCKS): New macros. (_free_internal, memalign): Use them. (_malloc_mutex, _aligned_blocks_mutex) [USE_PTHREAD]: Initialize to PTHREAD_MUTEX_INITIALIZER. (malloc_initialize_1) [USE_PTHREAD]: Don't use recursive mutex. (morecore_nolock): Rename from morecore. All uses changed. Use only nolock versions of internal allocation functions. (_malloc_internal_nolock, _realloc_internal_nolock) (_free_internal_nolock): New functions created from _malloc_internal, _realloc_internal, and _free_internal. (_malloc_internal, _realloc_internal, _free_internal): Use them. Copy hook value to automatic variable before its use. (memalign): Copy hook value to automatic variable before its use.
This commit is contained in:
parent
8b0b7849d3
commit
8d0d84d25d
2 changed files with 141 additions and 52 deletions
|
|
@ -1,3 +1,21 @@
|
|||
2007-06-26 YAMAMOTO Mitsuharu <mituharu@math.s.chiba-u.ac.jp>
|
||||
|
||||
* gmalloc.c [HAVE_GTK_AND_PTHREAD]: Check this after including config.h.
|
||||
(_aligned_blocks_mutex) [USE_PTHREAD]: New variable.
|
||||
(LOCK_ALIGNED_BLOCKS, UNLOCK_ALIGNED_BLOCKS): New macros.
|
||||
(_free_internal, memalign): Use them.
|
||||
(_malloc_mutex, _aligned_blocks_mutex) [USE_PTHREAD]:
|
||||
Initialize to PTHREAD_MUTEX_INITIALIZER.
|
||||
(malloc_initialize_1) [USE_PTHREAD]: Don't use recursive mutex.
|
||||
(morecore_nolock): Rename from morecore. All uses changed.
|
||||
Use only nolock versions of internal allocation functions.
|
||||
(_malloc_internal_nolock, _realloc_internal_nolock)
|
||||
(_free_internal_nolock): New functions created from
|
||||
_malloc_internal, _realloc_internal, and _free_internal.
|
||||
(_malloc_internal, _realloc_internal, _free_internal): Use them.
|
||||
Copy hook value to automatic variable before its use.
|
||||
(memalign): Copy hook value to automatic variable before its use.
|
||||
|
||||
2007-06-26 Kenichi Handa <handa@m17n.org>
|
||||
|
||||
* coding.c (Ffind_operation_coding_system): Docstring improved.
|
||||
|
|
|
|||
175
src/gmalloc.c
175
src/gmalloc.c
|
|
@ -1,9 +1,6 @@
|
|||
/* This file is no longer automatically generated from libc. */
|
||||
|
||||
#define _MALLOC_INTERNAL
|
||||
#ifdef HAVE_GTK_AND_PTHREAD
|
||||
#define USE_PTHREAD
|
||||
#endif
|
||||
|
||||
/* The malloc headers and source files from the C library follow here. */
|
||||
|
||||
|
|
@ -40,6 +37,10 @@ Fifth Floor, Boston, MA 02110-1301, USA.
|
|||
#include <config.h>
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_GTK_AND_PTHREAD
|
||||
#define USE_PTHREAD
|
||||
#endif
|
||||
|
||||
#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
|
||||
|| defined STDC_HEADERS || defined PROTOTYPES) \
|
||||
&& ! defined (BROKEN_PROTOTYPES))
|
||||
|
|
@ -235,14 +236,21 @@ extern __malloc_size_t _bytes_free;
|
|||
extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
|
||||
extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
|
||||
extern void _free_internal PP ((__ptr_t __ptr));
|
||||
extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
|
||||
extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
|
||||
extern void _free_internal_nolock PP ((__ptr_t __ptr));
|
||||
|
||||
#ifdef USE_PTHREAD
|
||||
extern pthread_mutex_t _malloc_mutex;
|
||||
extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
|
||||
#define LOCK() pthread_mutex_lock (&_malloc_mutex)
|
||||
#define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
|
||||
#define LOCK_ALIGNED_BLOCKS() pthread_mutex_lock (&_aligned_blocks_mutex)
|
||||
#define UNLOCK_ALIGNED_BLOCKS() pthread_mutex_unlock (&_aligned_blocks_mutex)
|
||||
#else
|
||||
#define LOCK()
|
||||
#define UNLOCK()
|
||||
#define LOCK_ALIGNED_BLOCKS()
|
||||
#define UNLOCK_ALIGNED_BLOCKS()
|
||||
#endif
|
||||
|
||||
#endif /* _MALLOC_INTERNAL. */
|
||||
|
|
@ -554,7 +562,8 @@ register_heapinfo ()
|
|||
|
||||
#ifdef USE_PTHREAD
|
||||
static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
|
||||
pthread_mutex_t _malloc_mutex;
|
||||
pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
#endif
|
||||
|
||||
static void
|
||||
|
|
@ -567,7 +576,9 @@ malloc_initialize_1 ()
|
|||
if (__malloc_initialize_hook)
|
||||
(*__malloc_initialize_hook) ();
|
||||
|
||||
#ifdef USE_PTHREAD
|
||||
/* We don't use recursive mutex because pthread_mutexattr_init may
|
||||
call malloc internally. */
|
||||
#if 0 /* defined (USE_PTHREAD) */
|
||||
{
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
|
|
@ -616,9 +627,9 @@ static int morecore_recursing;
|
|||
|
||||
/* Get neatly aligned memory, initializing or
|
||||
growing the heap info table as necessary. */
|
||||
static __ptr_t morecore PP ((__malloc_size_t));
|
||||
static __ptr_t morecore_nolock PP ((__malloc_size_t));
|
||||
static __ptr_t
|
||||
morecore (size)
|
||||
morecore_nolock (size)
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t result;
|
||||
|
|
@ -661,7 +672,7 @@ morecore (size)
|
|||
`morecore_recursing' flag and return null. */
|
||||
int save = errno; /* Don't want to clobber errno with ENOMEM. */
|
||||
morecore_recursing = 1;
|
||||
newinfo = (malloc_info *) _realloc_internal
|
||||
newinfo = (malloc_info *) _realloc_internal_nolock
|
||||
(_heapinfo, newsize * sizeof (malloc_info));
|
||||
morecore_recursing = 0;
|
||||
if (newinfo == NULL)
|
||||
|
|
@ -717,7 +728,7 @@ morecore (size)
|
|||
/* Reset _heaplimit so _free_internal never decides
|
||||
it can relocate or resize the info table. */
|
||||
_heaplimit = 0;
|
||||
_free_internal (oldinfo);
|
||||
_free_internal_nolock (oldinfo);
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
|
||||
/* The new heap limit includes the new table just allocated. */
|
||||
|
|
@ -732,7 +743,7 @@ morecore (size)
|
|||
|
||||
/* Allocate memory from the heap. */
|
||||
__ptr_t
|
||||
_malloc_internal (size)
|
||||
_malloc_internal_nolock (size)
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t result;
|
||||
|
|
@ -752,7 +763,6 @@ _malloc_internal (size)
|
|||
return NULL;
|
||||
#endif
|
||||
|
||||
LOCK ();
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
|
||||
if (size < sizeof (struct list))
|
||||
|
|
@ -802,8 +812,10 @@ _malloc_internal (size)
|
|||
/* No free fragments of the desired size, so get a new block
|
||||
and break it into fragments, returning the first. */
|
||||
#ifdef GC_MALLOC_CHECK
|
||||
result = _malloc_internal (BLOCKSIZE);
|
||||
result = _malloc_internal_nolock (BLOCKSIZE);
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
#elif defined (USE_PTHREAD)
|
||||
result = _malloc_internal_nolock (BLOCKSIZE);
|
||||
#else
|
||||
result = malloc (BLOCKSIZE);
|
||||
#endif
|
||||
|
|
@ -874,7 +886,7 @@ _malloc_internal (size)
|
|||
_heaplimit += wantblocks - lastblocks;
|
||||
continue;
|
||||
}
|
||||
result = morecore (wantblocks * BLOCKSIZE);
|
||||
result = morecore_nolock (wantblocks * BLOCKSIZE);
|
||||
if (result == NULL)
|
||||
goto out;
|
||||
block = BLOCK (result);
|
||||
|
|
@ -932,7 +944,19 @@ _malloc_internal (size)
|
|||
|
||||
PROTECT_MALLOC_STATE (1);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
__ptr_t
|
||||
_malloc_internal (size)
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t result;
|
||||
|
||||
LOCK ();
|
||||
result = _malloc_internal_nolock (size);
|
||||
UNLOCK ();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -940,10 +964,21 @@ __ptr_t
|
|||
malloc (size)
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t (*hook) (__malloc_size_t);
|
||||
|
||||
if (!__malloc_initialized && !__malloc_initialize ())
|
||||
return NULL;
|
||||
|
||||
return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
|
||||
/* Copy the value of __malloc_hook to an automatic variable in case
|
||||
__malloc_hook is modified in another thread between its
|
||||
NULL-check and the use.
|
||||
|
||||
Note: Strictly speaking, this is not a right solution. We should
|
||||
use mutexes to access non-read-only variables that are shared
|
||||
among multiple threads. We just leave it for compatibility with
|
||||
glibc malloc (i.e., assignments to __malloc_hook) for now. */
|
||||
hook = __malloc_hook;
|
||||
return (hook != NULL ? *hook : _malloc_internal) (size);
|
||||
}
|
||||
|
||||
#ifndef _LIBC
|
||||
|
|
@ -1024,9 +1059,9 @@ void (*__free_hook) PP ((__ptr_t __ptr));
|
|||
struct alignlist *_aligned_blocks = NULL;
|
||||
|
||||
/* Return memory to the heap.
|
||||
Like `free' but don't call a __free_hook if there is one. */
|
||||
Like `_free_internal' but don't lock mutex. */
|
||||
void
|
||||
_free_internal (ptr)
|
||||
_free_internal_nolock (ptr)
|
||||
__ptr_t ptr;
|
||||
{
|
||||
int type;
|
||||
|
|
@ -1043,9 +1078,9 @@ _free_internal (ptr)
|
|||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
LOCK ();
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
|
||||
LOCK_ALIGNED_BLOCKS ();
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next)
|
||||
if (l->aligned == ptr)
|
||||
{
|
||||
|
|
@ -1053,6 +1088,7 @@ _free_internal (ptr)
|
|||
ptr = l->exact;
|
||||
break;
|
||||
}
|
||||
UNLOCK_ALIGNED_BLOCKS ();
|
||||
|
||||
block = BLOCK (ptr);
|
||||
|
||||
|
|
@ -1158,7 +1194,7 @@ _free_internal (ptr)
|
|||
table's blocks to the system before we have copied them to
|
||||
the new location. */
|
||||
_heaplimit = 0;
|
||||
_free_internal (_heapinfo);
|
||||
_free_internal_nolock (_heapinfo);
|
||||
_heaplimit = oldlimit;
|
||||
|
||||
/* Tell malloc to search from the beginning of the heap for
|
||||
|
|
@ -1166,8 +1202,8 @@ _free_internal (ptr)
|
|||
_heapindex = 0;
|
||||
|
||||
/* Allocate new space for the info table and move its data. */
|
||||
newinfo = (malloc_info *) _malloc_internal (info_blocks
|
||||
* BLOCKSIZE);
|
||||
newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
|
||||
* BLOCKSIZE);
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
|
||||
_heapinfo = newinfo;
|
||||
|
|
@ -1230,8 +1266,8 @@ _free_internal (ptr)
|
|||
_chunks_free -= BLOCKSIZE >> type;
|
||||
_bytes_free -= BLOCKSIZE;
|
||||
|
||||
#ifdef GC_MALLOC_CHECK
|
||||
_free_internal (ADDRESS (block));
|
||||
#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
|
||||
_free_internal_nolock (ADDRESS (block));
|
||||
#else
|
||||
free (ADDRESS (block));
|
||||
#endif
|
||||
|
|
@ -1269,6 +1305,16 @@ _free_internal (ptr)
|
|||
}
|
||||
|
||||
PROTECT_MALLOC_STATE (1);
|
||||
}
|
||||
|
||||
/* Return memory to the heap.
|
||||
Like `free' but don't call a __free_hook if there is one. */
|
||||
void
|
||||
_free_internal (ptr)
|
||||
__ptr_t ptr;
|
||||
{
|
||||
LOCK ();
|
||||
_free_internal_nolock (ptr);
|
||||
UNLOCK ();
|
||||
}
|
||||
|
||||
|
|
@ -1278,8 +1324,10 @@ FREE_RETURN_TYPE
|
|||
free (ptr)
|
||||
__ptr_t ptr;
|
||||
{
|
||||
if (__free_hook != NULL)
|
||||
(*__free_hook) (ptr);
|
||||
void (*hook) (__ptr_t) = __free_hook;
|
||||
|
||||
if (hook != NULL)
|
||||
(*hook) (ptr);
|
||||
else
|
||||
_free_internal (ptr);
|
||||
}
|
||||
|
|
@ -1415,7 +1463,7 @@ __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
|
|||
new region. This module has incestuous knowledge of the
|
||||
internals of both free and malloc. */
|
||||
__ptr_t
|
||||
_realloc_internal (ptr, size)
|
||||
_realloc_internal_nolock (ptr, size)
|
||||
__ptr_t ptr;
|
||||
__malloc_size_t size;
|
||||
{
|
||||
|
|
@ -1425,15 +1473,14 @@ _realloc_internal (ptr, size)
|
|||
|
||||
if (size == 0)
|
||||
{
|
||||
_free_internal (ptr);
|
||||
return _malloc_internal (0);
|
||||
_free_internal_nolock (ptr);
|
||||
return _malloc_internal_nolock (0);
|
||||
}
|
||||
else if (ptr == NULL)
|
||||
return _malloc_internal (size);
|
||||
return _malloc_internal_nolock (size);
|
||||
|
||||
block = BLOCK (ptr);
|
||||
|
||||
LOCK ();
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
|
||||
type = _heapinfo[block].busy.type;
|
||||
|
|
@ -1443,11 +1490,11 @@ _realloc_internal (ptr, size)
|
|||
/* Maybe reallocate a large block to a small fragment. */
|
||||
if (size <= BLOCKSIZE / 2)
|
||||
{
|
||||
result = _malloc_internal (size);
|
||||
result = _malloc_internal_nolock (size);
|
||||
if (result != NULL)
|
||||
{
|
||||
memcpy (result, ptr, size);
|
||||
_free_internal (ptr);
|
||||
_free_internal_nolock (ptr);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
@ -1467,7 +1514,7 @@ _realloc_internal (ptr, size)
|
|||
Now we will free this chunk; increment the statistics counter
|
||||
so it doesn't become wrong when _free_internal decrements it. */
|
||||
++_chunks_used;
|
||||
_free_internal (ADDRESS (block + blocks));
|
||||
_free_internal_nolock (ADDRESS (block + blocks));
|
||||
result = ptr;
|
||||
}
|
||||
else if (blocks == _heapinfo[block].busy.info.size)
|
||||
|
|
@ -1482,8 +1529,8 @@ _realloc_internal (ptr, size)
|
|||
/* Prevent free from actually returning memory to the system. */
|
||||
oldlimit = _heaplimit;
|
||||
_heaplimit = 0;
|
||||
_free_internal (ptr);
|
||||
result = _malloc_internal (size);
|
||||
_free_internal_nolock (ptr);
|
||||
result = _malloc_internal_nolock (size);
|
||||
PROTECT_MALLOC_STATE (0);
|
||||
if (_heaplimit == 0)
|
||||
_heaplimit = oldlimit;
|
||||
|
|
@ -1493,13 +1540,13 @@ _realloc_internal (ptr, size)
|
|||
the thing we just freed. Unfortunately it might
|
||||
have been coalesced with its neighbors. */
|
||||
if (_heapindex == block)
|
||||
(void) _malloc_internal (blocks * BLOCKSIZE);
|
||||
(void) _malloc_internal_nolock (blocks * BLOCKSIZE);
|
||||
else
|
||||
{
|
||||
__ptr_t previous
|
||||
= _malloc_internal ((block - _heapindex) * BLOCKSIZE);
|
||||
(void) _malloc_internal (blocks * BLOCKSIZE);
|
||||
_free_internal (previous);
|
||||
= _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
|
||||
(void) _malloc_internal_nolock (blocks * BLOCKSIZE);
|
||||
_free_internal_nolock (previous);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -1519,18 +1566,31 @@ _realloc_internal (ptr, size)
|
|||
{
|
||||
/* The new size is different; allocate a new space,
|
||||
and copy the lesser of the new size and the old. */
|
||||
result = _malloc_internal (size);
|
||||
result = _malloc_internal_nolock (size);
|
||||
if (result == NULL)
|
||||
goto out;
|
||||
memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
|
||||
_free_internal (ptr);
|
||||
_free_internal_nolock (ptr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
PROTECT_MALLOC_STATE (1);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
__ptr_t
|
||||
_realloc_internal (ptr, size)
|
||||
__ptr_t ptr;
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t result;
|
||||
|
||||
LOCK();
|
||||
result = _realloc_internal_nolock (ptr, size);
|
||||
UNLOCK ();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -1539,11 +1599,13 @@ realloc (ptr, size)
|
|||
__ptr_t ptr;
|
||||
__malloc_size_t size;
|
||||
{
|
||||
__ptr_t (*hook) (__ptr_t, __malloc_size_t);
|
||||
|
||||
if (!__malloc_initialized && !__malloc_initialize ())
|
||||
return NULL;
|
||||
|
||||
return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
|
||||
(ptr, size);
|
||||
hook = __realloc_hook;
|
||||
return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
|
||||
}
|
||||
/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
|
||||
|
||||
|
|
@ -1681,9 +1743,10 @@ memalign (alignment, size)
|
|||
{
|
||||
__ptr_t result;
|
||||
unsigned long int adj, lastadj;
|
||||
__ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
|
||||
|
||||
if (__memalign_hook)
|
||||
return (*__memalign_hook) (alignment, size);
|
||||
if (hook)
|
||||
return (*hook) (alignment, size);
|
||||
|
||||
/* Allocate a block with enough extra space to pad the block with up to
|
||||
(ALIGNMENT - 1) bytes if necessary. */
|
||||
|
|
@ -1718,6 +1781,7 @@ memalign (alignment, size)
|
|||
of an allocated block. */
|
||||
|
||||
struct alignlist *l;
|
||||
LOCK_ALIGNED_BLOCKS ();
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next)
|
||||
if (l->aligned == NULL)
|
||||
/* This slot is free. Use it. */
|
||||
|
|
@ -1725,16 +1789,23 @@ memalign (alignment, size)
|
|||
if (l == NULL)
|
||||
{
|
||||
l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
||||
if (l == NULL)
|
||||
if (l != NULL)
|
||||
{
|
||||
free (result);
|
||||
return NULL;
|
||||
l->next = _aligned_blocks;
|
||||
_aligned_blocks = l;
|
||||
}
|
||||
l->next = _aligned_blocks;
|
||||
_aligned_blocks = l;
|
||||
}
|
||||
l->exact = result;
|
||||
result = l->aligned = (char *) result + alignment - adj;
|
||||
if (l != NULL)
|
||||
{
|
||||
l->exact = result;
|
||||
result = l->aligned = (char *) result + alignment - adj;
|
||||
}
|
||||
UNLOCK_ALIGNED_BLOCKS ();
|
||||
if (l == NULL)
|
||||
{
|
||||
free (result);
|
||||
result = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue