diff --git a/elkscmd/ash/Makefile b/elkscmd/ash/Makefile index 02fefad3c..47102757c 100644 --- a/elkscmd/ash/Makefile +++ b/elkscmd/ash/Makefile @@ -25,8 +25,8 @@ OBJS= builtins.o cd.o dirent.o error.o eval.o exec.o expand.o input.o \ output.o var.o init.o \ linenoise_elks.o autocomplete.o -# heap debugging -#OBJS += ../../libc/malloc/v7malloc.o +# heap debugging using v7 debug malloc +#OBJS += v7stub.o # builtins must be manually added OBJS += bltin/echo.o diff --git a/elkscmd/ash/v7stub.c b/elkscmd/ash/v7stub.c new file mode 100644 index 000000000..ef580f950 --- /dev/null +++ b/elkscmd/ash/v7stub.c @@ -0,0 +1,17 @@ +/* stub to force inclusion of debug malloc (v7) */ +#include + +void *malloc(size_t size) +{ + return __dmalloc(size); +} + +void free(void *ptr) +{ + __dfree(ptr); +} + +void *realloc(void *ptr, size_t size) +{ + return __drealloc(ptr, size); +} diff --git a/libc/include/malloc.h b/libc/include/malloc.h index a87763e78..b1640e4f6 100644 --- a/libc/include/malloc.h +++ b/libc/include/malloc.h @@ -3,30 +3,23 @@ #include +/* default malloc (dev86) */ void *malloc(size_t); -void free(void *); void *realloc(void *, size_t); -void *calloc(size_t elm, size_t sz); - -#ifdef __LIBC__ -/* - * Mini malloc allows you to use a less efficient but smaller malloc the - * cost is about 100 bytes of code in free but malloc (700bytes) doesn't - * have to be linked. Unfortunatly memory can only be reused if everything - * above it has been freed - * - */ +void free(void *); -/* remove __MINI_MALLOC__ and always use real malloc for libc routines */ -//#define __MINI_MALLOC__ +/* debug malloc (v7 malloc) */ +void *__dmalloc(size_t); +void *__drealloc(void *, size_t); +void __dfree(void *); -void __wcnear *__mini_malloc(size_t size); -#endif +/* arena malloc (64k near/unlimited far heap) */ +void *__amalloc(size_t); +int __amalloc_add_heap(char __far *start, size_t size); +void *__arealloc(void *, size_t); /* NYI */ +void __afree(void *); -#ifdef __MINI_MALLOC__ -extern void __wcnear *(*__alloca_alloc)(size_t); -#define malloc(x) ((*__alloca_alloc)(x)) -#endif +void *calloc(size_t elm, size_t sz); /* alloc/free from main memory */ void __far *fmemalloc(unsigned long size); diff --git a/libc/malloc/Makefile b/libc/malloc/Makefile index c6cec8679..0414d5a66 100644 --- a/libc/malloc/Makefile +++ b/libc/malloc/Makefile @@ -4,32 +4,49 @@ COMPILER ?= ia16 LIB ?= out.a include $(TOPDIR)/libc/$(COMPILER).inc + +# options for default malloc (dev86) only: # allocations smaller than MCHUNK words (not bytes) are rounded up, # larger requests are allocated from heap as is. CFLAGS += -DMCHUNK=16 +#CFLAGS += -DVERBOSE=1 #CFLAGS += -DMINALLOC #CFLAGS += -DLAZY_FREE -CFLAGS += -DVERBOSE=1 #CFLAGS += -DL_alloca -# use V7 malloc for heap integrity checking -#OBJS = v7malloc.o calloc.o sbrk.o brk.o - -OBJS = \ +# default malloc (dev86) +DEFAULT_MALLOC_OBJS = \ + malloc.o \ + free.o \ + realloc.o \ + __mini_malloc.o \ __alloca_alloc.o \ __freed_list.o \ - __mini_malloc.o \ + noise.o \ alloca.o \ - brk.o \ + +# debug malloc (v7) +DEBUG_MALLOC_OBJS = v7malloc.o + +# arena malloc +ARENA_MALLOC_OBJS = amalloc.o + +# these objects work with any malloc +OBJS = \ calloc.o \ - free.o \ - malloc.o \ - noise.o \ - dprintf.o \ - realloc.o \ + brk.o \ sbrk.o \ fmemalloc.o \ fmemfree.o \ + dprintf.o \ + +# default and debug mallocs available for ia16 and OWC +OBJS += $(DEFAULT_MALLOC_OBJS) $(DEBUG_MALLOC_OBJS) + +# arena malloc for OWC only for now +ifeq "$(COMPILER)" "watcom" +OBJS += $(ARENA_MALLOC_OBJS) +endif IA16OBJS = \ stackcheck.o \ diff --git a/libc/malloc/_malloc.h b/libc/malloc/_malloc.h index ffdfd85c4..23ff57f52 100644 --- a/libc/malloc/_malloc.h +++ b/libc/malloc/_malloc.h @@ -1,18 +1,23 @@ #ifndef _MALLOC_H -#define _MALLOC_H +#define _MALLOC_H +/* + * Internal types for default malloc (dev86) + */ #include typedef union mem_cell { - union mem_cell __wcnear *next; /* A pointer to the next mem */ - unsigned int size; /* An int >= sizeof pointer */ - char __wcnear *depth; /* For the alloca hack */ + union mem_cell __wcnear *next; /* A pointer to the next mem */ + unsigned int size; /* An int >= sizeof pointer */ + char __wcnear *depth; /* For the alloca hack */ } mem; +void __wcnear *__mini_malloc(size_t size); void __noise(char *y, mem __wcnear *x); int __dprintf(const char *fmt, ...); extern int __debug_level; +extern mem __wcnear *__freed_list; #if !VERBOSE #define dprintf(...) @@ -25,10 +30,21 @@ extern int __debug_level; #define debug(str,ptr) __noise(str,ptr) #endif -#define m_deep(p) ((p) [0].depth) /* For alloca */ -#define m_next(p) ((p) [1].next) /* For malloc and alloca */ -#define m_size(p) ((p) [0].size) /* For malloc */ +#define m_deep(p) ((p) [0].depth) /* For alloca */ +#define m_next(p) ((p) [1].next) /* For malloc and alloca */ +#define m_size(p) ((p) [0].size) /* For malloc */ -extern mem __wcnear *__freed_list; +/* + * Mini malloc allows you to use a less efficient but smaller malloc the + * cost is about 100 bytes of code in free but malloc (700bytes) doesn't + * have to be linked. Unfortunatly memory can only be reused if everything + * above it has been freed + */ +/* remove __MINI_MALLOC__ and always use real malloc for libc routines */ +/*#define __MINI_MALLOC__*/ +#ifdef __MINI_MALLOC__ +extern void __wcnear *(*__alloca_alloc)(size_t); +#define malloc(x) ((*__alloca_alloc)(x)) /* NOTE won't work anymore */ +#endif #endif diff --git a/libc/malloc/amalloc.c b/libc/malloc/amalloc.c new file mode 100644 index 000000000..407387391 --- /dev/null +++ b/libc/malloc/amalloc.c @@ -0,0 +1,351 @@ +/* + * __amalloc - Arena-based heap allocator - provides up to 64k local (far) heap + * Based on __dmalloc (v7 debug malloc). + * 16 Dec 2024 Greg Haerr + * + * Small malloc/realloc/free with heap checking + * Ported to ELKS from V7 malloc by Greg Haerr 20 Apr 2020 + * + * Enhancements: + * Minimum BLOCK allocate from kernel sbrk, > BLOCK allocates requested size + * Much improved size and heap overflow handling with errno returns + * Full heap integrity checking and reporting with DEBUG options + * Use near heap pointers to work with OpenWatcom large model + * Combine free areas at heap start before allocating from free area at end of heap + */ +#include +#include +#include +#include +#define DEBUG 2 /* =1 heap checking asserts, =2 sysctl, =3 show heap */ + +/* C storage allocator + * circular first-fit strategy + * works with noncontiguous, but monotonically linked, arena + * each block is preceded by a ptr to the (pointer of) + * the next following block + * blocks are exact number of words long + * aligned to the data type requirements of ALIGN + * pointers to blocks must have BUSY bit 0 + * bit in ptr is 1 for busy, 0 for idle + * gaps in arena are merely noted as busy blocks + * last block of arena (pointed to by alloct) is empty and + * has a pointer to first + * idle blocks are coalesced during space search + * + * a different implementation may need to redefine + * ALIGN, NALIGN, BLOCK, BUSY, INT + * where INT is integer type to which a pointer can be cast + */ +#define INT int +#define ALIGN int +#define NALIGN 1 +#define BUSY 1 +#define BLOCK 34 /* min+WORD amount to sbrk */ +#define MINALLOC 14 /* minimum actual malloc size */ +#define GRANULE 0 /* sbrk granularity */ + +union store { + union store __wcnear *ptr; + ALIGN dummy[NALIGN]; +}; +typedef union store __wcnear *NPTR; +typedef union store __far *FPTR; +#define WORD sizeof(union store) + +#define FP_SEG(fp) ((unsigned)((unsigned long)(void __far *)(fp) >> 16)) +#define FP_OFF(fp) ((unsigned)(unsigned long)(void __far *)(fp)) +#define MK_FPTR(seg,off) ((FPTR)((((unsigned long)(seg)) << 16) | ((unsigned int)(off)))) + +#define testbusy(p) ((INT)(p)&BUSY) +#define setbusy(p) (NPTR)((INT)(p)|BUSY) +#define clearbusy(p) (NPTR)((INT)(p)&~BUSY) +#define next(p) ((MK_FPTR(allocseg,p))->ptr) + +static FPTR allocs; /* arena base address */ +static unsigned int allocsize; /* total arena size in bytes */ +static unsigned int allocseg; /* arena segment */ + +static NPTR allocp; /*search ptr*/ +static NPTR alloct; /*arena top*/ +static NPTR allocx; /*for benefit of realloc*/ + +#if DEBUG +#define ASSERT(p) if(!(p))malloc_assert_fail(#p);else {} +static void malloc_assert_fail(char *s); +static int malloc_check_heap(void); +#else +#define ASSERT(p) +#endif + +#if DEBUG > 1 +#define debug(...) do { if (debug_level > 1) __dprintf(__VA_ARGS__); } while (0) +#define debug2(...) do { if (debug_level > 2) __dprintf(__VA_ARGS__); } while (0) +int __dprintf(const char *fmt, ...); +static void malloc_show_heap(void); +static int debug_level = DEBUG; +#else +#define debug(...) +#define malloc_show_heap() +#endif + +/* add size bytes to arena malloc heap, must be done before first malloc */ +int __amalloc_add_heap(char __far *start, size_t size) +{ + if (size < 16) + return 0; + + allocs = (FPTR)start; + allocseg = FP_SEG(start); + allocsize = size / sizeof(union store); + debug("Adding %04x %04x size %d DS %04x %04x\n", start, size, &size); + + allocs[0].ptr = setbusy(&allocs[1]); + allocs[1].ptr = (NPTR)&allocs[allocsize-2]; + allocs[allocsize-2].ptr = setbusy(&allocs[allocsize-1]); + allocs[allocsize-1].ptr = setbusy(&allocs[0]); + alloct = (NPTR)&allocs[allocsize-1]; + allocp = (NPTR)&allocs[0]; + return 1; +} + +void * +__amalloc(size_t nbytes) +{ + NPTR p, q; + unsigned int nw, temp; + +#if DEBUG == 2 + sysctl(CTL_GET, "malloc.debug", &debug_level); +#endif + + debug("(%d)malloc(%u) ", getpid(), nbytes); + if (!allocs) + return NULL; + errno = 0; + if (nbytes == 0) { + debug(" (malloc 0) = NULL\n"); + return NULL; /* ANSI std, no error */ + } + if (nbytes < MINALLOC) + nbytes = MINALLOC; + + /* check INT overflow beyond 32762 (nbytes/WORD+2*WORD+(WORD-1) > 0xFFFF/WORD/WORD) */ + if (nbytes > ((unsigned)-1)/WORD-2*WORD-(WORD-1)) { + debug(" (req too big) = NULL\n"); + errno = ENOMEM; + return(NULL); + } + nw = (nbytes+WORD+WORD-1)/WORD; /* extra word for link ptr/size*/ + + ASSERT(allocp>=allocs && allocp<=alloct); + ASSERT(malloc_check_heap()); + /* combine free areas at heap start before allocating from free area past allocp */ + allocp = (NPTR)allocs; + for(p=allocp; ; ) { + for(temp=0; ; ) { + if(!testbusy(next(p))) { + while(!testbusy(next(q = next(p)))) { + if (debug_level > 2) malloc_show_heap(); + ASSERT(q>p); + ASSERT(q=p+nw && p+nw>=p) + goto found; + } + q = p; + p = clearbusy(next(p)); + if(p>q) { + ASSERT(p<=alloct); + } else if(q!=alloct || p!=(NPTR)allocs) { + ASSERT(q==alloct&&p==(NPTR)allocs); + debug(" (corrupt) = NULL\n"); + errno = ENOMEM; + return(NULL); + } else if(++temp>1) + break; + } + +#if 1 // SIZE > 2 + debug("Out of fixed heap\n"); + return NULL; +#else + + /* extend break at least BLOCK bytes at a time, plus a word for top link */ + if (nw < BLOCK/WORD) + temp = BLOCK/WORD + 1; + else + temp = nw + 1; /* NOTE always allocates full req w/o looking at free at top */ + + if (debug_level > 2) malloc_show_heap(); + debug("sbrk(%d) ", temp*WORD); +#if 0 /* not required and slow, initial break always even */ + q = (NPTR)sbrk(0); + if((INT)q & (sizeof(union store) - 1)) + sbrk(4 - ((INT)q & (sizeof(union store) - 1))); + + /* check possible address wrap - performed in kernel */ + if(q+temp+GRANULE < q) { + debug(" (no more address space) = NULL\n"); + errno = ENOMEM; + return(NULL); + } +#endif + q = (NPTR)sbrk(temp*WORD); + if((INT)q == -1) { + debug(" (no more mem) = NULL\n"); + malloc_show_heap(); + errno = ENOMEM; + return(NULL); + } + ASSERT(!((INT)q & 1)); + ASSERT(q>alloct); + next(alloct) = q; + if(q!=alloct+1) /* mark any gap as permanently allocated*/ + next(alloct) = setbusy(next(alloct)); + alloct = next(q) = q+temp-1; + debug("(TOTAL %u) ", + sizeof(union store) + + (clearbusy(alloct) - clearbusy(allocs[allocsize-1].ptr)) * sizeof(union store)); + next(alloct) = setbusy(allocs); +#endif + } +found: + allocp = p + nw; + ASSERT(allocp<=alloct); + if(q>allocp) { + allocx = next(allocp); /* save contents in case of realloc data overwrite*/ + next(allocp) = next(p); + } + next(p) = setbusy(allocp); + debug("= %04x\n", (unsigned)p); + malloc_show_heap(); + return MK_FPTR(allocseg, p+1); +} + +/* freeing strategy tuned for LIFO allocation + */ +void +__afree(void *ptr) +{ + NPTR p = (NPTR)ptr; + + if (p == NULL) + return; + debug("(%d) free(%d) = %04x\n", getpid(), (unsigned)(p[-1].ptr - p) << 1, p-1); + ASSERT(FP_SEG(ptr)==allocseg); + ASSERT(p>clearbusy(allocs[allocsize-1].ptr)&&p<=alloct); + ASSERT(malloc_check_heap()); + allocp = --p; + ASSERT(testbusy(next(p))); + next(p) = clearbusy(next(p)); + ASSERT(next(p) > allocp && next(p) <= alloct); + malloc_show_heap(); +} + +#if LATER +/* realloc(p, nbytes) reallocates a block obtained from malloc() + * and freed since last call of malloc() + * to have new size nbytes, and old content + * returns new location, or 0 on failure + */ +void * +__arealloc(void *ptr, size_t nbytes) +{ + NPTR p = (NPTR)ptr; + NPTR q; + NPTR s, t; + unsigned int nw, onw; + + if (p == 0) + return __amalloc(nbytes); + debug("(%d)realloc(%04x,%u) ", getpid(), (unsigned)(p-1), nbytes); + + ASSERT(testbusy(p[-1].ptr)); + if(testbusy(p[-1].ptr)) + free(p); + onw = p[-1].ptr - p; + q = (NPTR)__amalloc(nbytes); // FIXME and also use memcpy + if(q==NULL || q==p) + return((void *)q); + + /* copy old data into new allocation*/ + s = p; + t = q; + nw = (nbytes+WORD-1)/WORD; + if(nw=p) { + debug("allocx patch %04x,%04x,%d ", (unsigned)q, (unsigned)p, nw); + next(q+(q+nw-p)) = allocx; + } + debug("= %04x\n", (unsigned)q); + return((void *)q); +} +#endif + +#if DEBUG +static void malloc_assert_fail(char *s) +{ + __dprintf("malloc assert fail: %s\n", s); + abort(); +} + +static int +malloc_check_heap(void) +{ + NPTR p; + int x = 0; + + //debug("next(0) = %04x\n", clearbusy(next(&allocs[0]))); + for(p=(NPTR)&allocs[0]; clearbusy(next(p)) > p; p=clearbusy(next(p))) { + //debug("%04x %04x %04x\n", (unsigned)p, (unsigned)alloct, (unsigned)next(p)); + if(p==allocp) + x++; + } + if (p != alloct) debug("%04x %04x %04x\n", + (unsigned)p, (unsigned)alloct, (unsigned)next(p)); + ASSERT(p==alloct); + return((x==1)|(p==allocp)); +} +#endif + +#if DEBUG > 1 +static void +malloc_show_heap(void) +{ + NPTR p; + int n = 1; + unsigned int size, alloc = 0, free = 0; + + debug2("--- heap size ---\n"); + malloc_check_heap(); + for(p = (NPTR)&allocs[0]; clearbusy(next(p)) > p; p=clearbusy(next(p))) { + size = (clearbusy(next(p)) - clearbusy(p)) * sizeof(union store); + debug2("%2d: %04x %4u", n, (unsigned)p, size); + if (!testbusy(next(p))) { + debug2(" (free)"); + free += size; + } else { + if (n < 2) + debug2(" (skipped)"); + alloc += size; + } + n++; + debug2("\n"); + } + alloc += sizeof(union store); + debug2("%2d: %04x %4u (top) ", n, (unsigned)alloct, 2); + debug("alloc %u, free %u, total %u\n", alloc, free, alloc+free); +} +#endif diff --git a/libc/malloc/malloc.c b/libc/malloc/malloc.c index 6d79938ae..6a07acfe1 100644 --- a/libc/malloc/malloc.c +++ b/libc/malloc/malloc.c @@ -196,7 +196,7 @@ malloc(size_t size) #if VERBOSE == 1 if (chunk_list == 0) - sysctl(CTL_GET, "kern.debug", &__debug_level); + sysctl(CTL_GET, "malloc.debug", &__debug_level); #endif errno = 0; diff --git a/libc/malloc/v7malloc.c b/libc/malloc/v7malloc.c index 2726e7ee5..5d3b81f5e 100644 --- a/libc/malloc/v7malloc.c +++ b/libc/malloc/v7malloc.c @@ -3,12 +3,15 @@ * Ported to ELKS from V7 malloc by Greg Haerr 20 Apr 2020 * * Enhancements: - * Minimum 1024 bytes (BLOCK) allocated from kernel sbrk, > 1024 allocates requested size - * Set DEBUG=1 for heap integrity checking on each call + * Minimum BLOCK allocate from kernel sbrk, > BLOCK allocates requested size + * Much improved size and heap overflow handling with errno returns + * Full heap integrity checking and reporting with DEBUG options + * Use near heap pointers to work with OpenWatcom large model + * Combine free areas at heap start before allocating from free area at end of heap */ #include #include -#include /* __MINI_MALLOC must not be defined in malloc.h include*/ +#include #define DEBUG 2 /* =1 heap checking asserts, =2 sysctl, =3 show heap */ /* C storage allocator @@ -33,28 +36,28 @@ #define ALIGN int #define NALIGN 1 #define BUSY 1 -#define WORD sizeof(union store) -//#define BLOCK 514 /* min+2, amount to sbrk */ -#define BLOCK 34 /* min+2, amount to sbrk */ +//#define BLOCK 514 /* min+WORD amount to sbrk */ +#define BLOCK 34 /* min+WORD amount to sbrk */ #define MINALLOC 14 /* minimum actual malloc size */ #define GRANULE 0 /* sbrk granularity */ -#ifndef NULL -#define NULL 0 -#endif -#define testbusy(p) ((INT)(p)&BUSY) -#define setbusy(p) (union store __wcnear *)((INT)(p)|BUSY) -#define clearbusy(p) (union store __wcnear *)((INT)(p)&~BUSY) union store { union store __wcnear *ptr; ALIGN dummy[NALIGN]; - int calloc; /*calloc clears an array of integers*/ }; +typedef union store __wcnear *NPTR; +#define WORD sizeof(union store) + +#define testbusy(p) ((INT)(p)&BUSY) +#define setbusy(p) (NPTR)((INT)(p)|BUSY) +#define clearbusy(p) (NPTR)((INT)(p)&~BUSY) +#define next(p) ((p)->ptr) -static union store allocs[2]; /*initial arena*/ -static union store __wcnear *allocp; /*search ptr*/ -static union store __wcnear *alloct; /*arena top*/ -static union store __wcnear *allocx; /*for benefit of realloc*/ +#define SIZE 2 +static union store allocs[SIZE]; +static NPTR allocp; /*search ptr*/ +static NPTR alloct; /*arena top*/ +static NPTR allocx; /*for benefit of realloc*/ #if DEBUG #include @@ -78,19 +81,23 @@ static int debug_level = DEBUG; #endif void * -malloc(size_t nbytes) +__dmalloc(size_t nbytes) { - union store __wcnear *p, __wcnear *q; + NPTR p, q; unsigned int nw, temp; #if DEBUG == 2 sysctl(CTL_GET, "malloc.debug", &debug_level); #endif - if(allocs[0].ptr==0) { /*first time*/ - allocs[0].ptr = setbusy((union store __wcnear *)&allocs[1]); - allocs[1].ptr = setbusy((union store __wcnear *)&allocs[0]); - alloct = (union store __wcnear *)&allocs[1]; - allocp = (union store __wcnear *)&allocs[0]; + if (allocs[0].ptr == 0) { /*first time*/ + allocs[0].ptr = setbusy(&allocs[1]); +#if SIZE > 2 + allocs[1].ptr = (NPTR)&allocs[SIZE-2]; + allocs[SIZE-2].ptr = setbusy(&allocs[SIZE-1]); +#endif + allocs[SIZE-1].ptr = setbusy(&allocs[0]); + alloct = (NPTR)&allocs[SIZE-1]; + allocp = (NPTR)&allocs[0]; } debug("(%d)malloc(%u) ", getpid(), nbytes); @@ -112,19 +119,19 @@ malloc(size_t nbytes) ASSERT(allocp>=allocs && allocp<=alloct); ASSERT(malloc_check_heap()); -allocp = (union store __wcnear *)allocs; /* experimental */ - //debug("search start %04x ", (unsigned)allocp); + /* combine free areas at heap start before allocating from free area past allocp */ + allocp = (NPTR)allocs; for(p=allocp; ; ) { for(temp=0; ; ) { - if(!testbusy(p->ptr)) { - while(!testbusy((q=p->ptr)->ptr)) { + if(!testbusy(next(p))) { + while(!testbusy(next(q = next(p)))) { if (debug_level > 2) malloc_show_heap(); ASSERT(q>p); ASSERT(qptr - p) * sizeof(union store), - (q->ptr - q) * sizeof(union store)); - p->ptr = q->ptr; + (next(p) - p) * sizeof(union store), + (next(q) - q) * sizeof(union store)); + next(p) = next(q); } debug2("q %04x p %04x nw %d p+nw %04x ", (unsigned)q, (unsigned)p, nw, (unsigned)(p+nw)); @@ -132,11 +139,11 @@ allocp = (union store __wcnear *)allocs; /* experimental */ goto found; } q = p; - p = clearbusy(p->ptr); + p = clearbusy(next(p)); if(p>q) { ASSERT(p<=alloct); - } else if(q!=alloct || p!=allocs) { - ASSERT(q==alloct&&p==allocs); + } else if(q!=alloct || p!=(NPTR)allocs) { + ASSERT(q==alloct&&p==(NPTR)allocs); debug(" (corrupt) = NULL\n"); errno = ENOMEM; return(NULL); @@ -144,6 +151,11 @@ allocp = (union store __wcnear *)allocs; /* experimental */ break; } +#if SIZE > 2 + debug("Out of fixed heap\n"); + return NULL; +#else + /* extend break at least BLOCK bytes at a time, plus a word for top link */ if (nw < BLOCK/WORD) temp = BLOCK/WORD + 1; @@ -153,7 +165,7 @@ allocp = (union store __wcnear *)allocs; /* experimental */ if (debug_level > 2) malloc_show_heap(); debug("sbrk(%d) ", temp*WORD); #if 0 /* not required and slow, initial break always even */ - q = (union store __wcnear *)sbrk(0); + q = (NPTR)sbrk(0); if((INT)q & (sizeof(union store) - 1)) sbrk(4 - ((INT)q & (sizeof(union store) - 1))); @@ -164,7 +176,7 @@ allocp = (union store __wcnear *)allocs; /* experimental */ return(NULL); } #endif - q = (union store __wcnear *)sbrk(temp*WORD); + q = (NPTR)sbrk(temp*WORD); if((INT)q == -1) { debug(" (no more mem) = NULL\n"); malloc_show_heap(); @@ -173,22 +185,24 @@ allocp = (union store __wcnear *)allocs; /* experimental */ } ASSERT(!((INT)q & 1)); ASSERT(q>alloct); - alloct->ptr = q; + next(alloct) = q; if(q!=alloct+1) /* mark any gap as permanently allocated*/ - alloct->ptr = setbusy(alloct->ptr); - alloct = q->ptr = q+temp-1; + next(alloct) = setbusy(next(alloct)); + alloct = next(q) = q+temp-1; debug("(TOTAL %u) ", - 2 + (clearbusy(alloct) - clearbusy(allocs[1].ptr)) * sizeof(union store)); - alloct->ptr = setbusy(allocs); + sizeof(union store) + + (clearbusy(alloct) - clearbusy(allocs[SIZE-1].ptr)) * sizeof(union store)); + next(alloct) = setbusy(allocs); +#endif } found: allocp = p + nw; ASSERT(allocp<=alloct); if(q>allocp) { - allocx = allocp->ptr; /* save contents in case of realloc data overwrite*/ - allocp->ptr = p->ptr; + allocx = next(allocp); /* save contents in case of realloc data overwrite*/ + next(allocp) = next(p); } - p->ptr = setbusy(allocp); + next(p) = setbusy(allocp); debug("= %04x\n", (unsigned)p); malloc_show_heap(); return((void *)(p+1)); @@ -197,19 +211,19 @@ allocp = (union store __wcnear *)allocs; /* experimental */ /* freeing strategy tuned for LIFO allocation */ void -free(void *ptr) +__dfree(void *ptr) { - union store __wcnear *p = (union store __wcnear *)ptr; + NPTR p = (NPTR)ptr; if (p == NULL) return; debug("(%d) free(%d) = %04x\n", getpid(), (unsigned)(p[-1].ptr - p) << 1, p-1); - ASSERT(p>clearbusy(allocs[1].ptr)&&p<=alloct); + ASSERT(p>clearbusy(allocs[SIZE-1].ptr)&&p<=alloct); ASSERT(malloc_check_heap()); allocp = --p; - ASSERT(testbusy(p->ptr)); - p->ptr = clearbusy(p->ptr); - ASSERT(p->ptr > allocp && p->ptr <= alloct); + ASSERT(testbusy(next(p))); + next(p) = clearbusy(next(p)); + ASSERT(next(p) > allocp && next(p) <= alloct); malloc_show_heap(); } @@ -219,22 +233,22 @@ free(void *ptr) * returns new location, or 0 on failure */ void * -realloc(void *ptr, size_t nbytes) +__drealloc(void *ptr, size_t nbytes) { - union store __wcnear *p = (union store __wcnear *)ptr; - union store __wcnear *q; - union store __wcnear *s, __wcnear *t; + NPTR p = (NPTR)ptr; + NPTR q; + NPTR s, t; unsigned int nw, onw; if (p == 0) - return malloc(nbytes); + return __dmalloc(nbytes); debug("(%d)realloc(%04x,%u) ", getpid(), (unsigned)(p-1), nbytes); ASSERT(testbusy(p[-1].ptr)); if(testbusy(p[-1].ptr)) free(p); onw = p[-1].ptr - p; - q = (union store __wcnear *)malloc(nbytes); + q = (NPTR)__dmalloc(nbytes); // FIXME and also use memcpy if(q==NULL || q==p) return((void *)q); @@ -250,7 +264,7 @@ realloc(void *ptr, size_t nbytes) /* restore old data for special case of malloc link overwrite*/ if(q

=p) { debug("allocx patch %04x,%04x,%d ", (unsigned)q, (unsigned)p, nw); - (q+(q+nw-p))->ptr = allocx; + next(q+(q+nw-p)) = allocx; } debug("= %04x\n", (unsigned)q); return((void *)q); @@ -266,15 +280,15 @@ static void malloc_assert_fail(char *s) static int malloc_check_heap(void) { - union store __wcnear *p; + NPTR p; int x = 0; - for(p=(union store __wcnear *)&allocs[0]; clearbusy(p->ptr) > p; p=clearbusy(p->ptr)) { + for(p=(NPTR)&allocs[0]; clearbusy(next(p)) > p; p=clearbusy(next(p))) { if(p==allocp) x++; } if (p != alloct) debug("%04x %04x %04x\n", - (unsigned)p, (unsigned)alloct, (unsigned)p->ptr); + (unsigned)p, (unsigned)alloct, (unsigned)next(p)); ASSERT(p==alloct); return((x==1)|(p==allocp)); } @@ -284,27 +298,30 @@ malloc_check_heap(void) static void malloc_show_heap(void) { - union store __wcnear *p; + NPTR p; int n = 1; unsigned int size, alloc = 0, free = 0; debug2("--- heap size ---\n"); malloc_check_heap(); - for(p = (union store __wcnear *)&allocs[0]; clearbusy(p->ptr) > p; p=clearbusy(p->ptr)) { - size = (clearbusy(p->ptr) - clearbusy(p)) * sizeof(union store); + for(p = (NPTR)&allocs[0]; clearbusy(next(p)) > p; p=clearbusy(next(p))) { + size = (clearbusy(next(p)) - clearbusy(p)) * sizeof(union store); debug2("%2d: %04x %4u", n, (unsigned)p, size); - if (!testbusy(p->ptr)) { + if (!testbusy(next(p))) { debug2(" (free)"); free += size; } else { +#if SIZE == 2 if (n < 3) /* don't count ptr to first sbrk()*/ debug2(" (skipped)"); - else alloc += size; + else +#endif + alloc += size; } n++; debug2("\n"); } - alloc += 2; + alloc += sizeof(union store); debug2("%2d: %04x %4u (top) ", n, (unsigned)alloct, 2); debug("alloc %u, free %u, total %u\n", alloc, free, alloc+free); }