/* Two Levels Segregate Fit memory allocator (TLSF) * Version 2.4.4 * * Modified for Tuplr by David Robillard. * Original TLSF code available at http://rtportal.upv.es/rtmalloc/ * * Written by Miguel Masmano Tello * * Thanks to Ismael Ripoll for his suggestions and reviews * * Copyright (C) 2008, 2007, 2006, 2005, 2004 * * This code is released using a dual license strategy: GPL/LGPL * You can choose the licence that better fits your requirements. * * Released under the terms of the GNU General Public License Version 2.0 * Released under the terms of the GNU Lesser General Public License Version 2.1 */ //#define USE_SBRK 1 #define USE_MMAP 1 //#define TLSF_STATISTICS 1 #if defined(USE_MMAP) || defined(USE_SBRK) #define _GNU_SOURCE 1 #include #endif #ifdef USE_MMAP #include #endif #include #include #include #ifdef TLSF_STATISTICS #define TLSF_ADD_SIZE(tlsf, b) do { \ tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \ if (tlsf->used_size > tlsf->max_size) \ tlsf->max_size = tlsf->used_size; \ } while (0) #define TLSF_REMOVE_SIZE(tlsf, b) do { \ tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \ } while (0) #else #define TLSF_ADD_SIZE(tlsf, b) do {} while (0) #define TLSF_REMOVE_SIZE(tlsf, b) do {} while (0) #endif #include "tlsf.h" /******************************************************************/ /************************ Definitions *****************************/ /******************************************************************/ #define BLOCK_ALIGN (sizeof(void *) * 2) #define MAX_FLI (30) #define MAX_LOG2_SLI (5) #define MAX_SLI (1 << MAX_LOG2_SLI) /* MAX_SLI = 2^MAX_LOG2_SLI */ #define FLI_OFFSET (6) /* tlsf structure will manage blocks > 128 bytes */ #define SMALL_BLOCK (128) #define REAL_FLI (MAX_FLI - FLI_OFFSET) #define MIN_BLOCK_SIZE (sizeof (free_ptr_t)) #define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE) #define PTR_MASK (sizeof(void *) - 1) #define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK) #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r))) #define MEM_ALIGN ((BLOCK_ALIGN) - 1) #define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN) #define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN) #define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x)) #define BLOCK_STATE (0x1) #define PREV_STATE (0x2) /* bit 0 of the block size */ #define FREE_BLOCK (0x1) #define USED_BLOCK (0x0) /* bit 1 of the block size */ #define PREV_FREE (0x2) #define PREV_USED (0x0) #define DEFAULT_AREA_SIZE (1024 * 10) #ifdef USE_MMAP #define PAGE_SIZE (sysconf(_SC_PAGESIZE)) #endif #define PRINT_MSG(fmt, args...) printf(fmt, ## args) #define ERROR_MSG(fmt, args...) printf(fmt, ## args) typedef struct free_ptr_s { struct bhdr_s *prev; struct bhdr_s *next; } free_ptr_t; typedef struct bhdr_s { /** Valid iff the first bit of size is set */ struct bhdr_s *prev_hdr; /** The size in bytes. * Bit 0 indicates whether the block is used. * Bit 1 indicates whether the previous block is free. */ size_t size; union { struct free_ptr_s free_ptr; uint8_t buffer[1]; /**< sizeof(struct free_ptr_s)]; */ } ptr; } bhdr_t; /** Embedded at the beginning of each area, giving us * enough information to cope with a set of areas */ typedef struct area_info_s { bhdr_t *end; struct area_info_s *next; } area_info_t; struct tlsf_s { #ifdef TLSF_STATISTICS size_t used_size; size_t max_size; #endif /** A linked list holding all the existing areas */ area_info_t *area_head; /** The first-level bitmap. * This array should have a size of REAL_FLI bits. */ uint32_t fl_bitmap; /** The second-level bitmap. */ uint32_t sl_bitmap[REAL_FLI]; bhdr_t *matrix[REAL_FLI][MAX_SLI]; }; /******************************************************************/ /********************** Helper Functions **************************/ /******************************************************************/ static inline void set_bit(int nr, uint32_t * addr); static inline void clear_bit(int nr, uint32_t * addr); static inline int ls_bit(int x); static inline int ms_bit(int x); static inline void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl); static inline void MAPPING_INSERT(size_t _r, int *_fl, int *_sl); static inline bhdr_t* FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl); static inline bhdr_t* process_area(void *area, size_t size); #if defined(USE_SBRK) || defined(USE_MMAP) static inline void* get_new_area(size_t * size); #endif static const int table[] = { -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static inline int ls_bit(int i) { unsigned int a; unsigned int x = i & -i; a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24); return table[x >> a] + a; } static inline int ms_bit(int i) { unsigned int a; unsigned int x = (unsigned int) i; a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24); return table[x >> a] + a; } static inline void set_bit(int nr, uint32_t * addr) { addr[nr >> 5] |= 1 << (nr & 0x1f); } static inline void clear_bit(int nr, uint32_t * addr) { addr[nr >> 5] &= ~(1 << (nr & 0x1f)); } static inline void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl) { int _t; if (*_r < SMALL_BLOCK) { *_fl = 0; *_sl = *_r / (SMALL_BLOCK / MAX_SLI); } else { _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1; *_r = *_r + _t; *_fl = ms_bit(*_r); *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI; *_fl -= FLI_OFFSET; /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0! *_fl = *_sl = 0; */ *_r &= ~_t; } } static inline void MAPPING_INSERT(size_t _r, int *_fl, int *_sl) { if (_r < SMALL_BLOCK) { *_fl = 0; *_sl = _r / (SMALL_BLOCK / MAX_SLI); } else { *_fl = ms_bit(_r); *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI; *_fl -= FLI_OFFSET; } } static inline bhdr_t* FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl) { uint32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl); bhdr_t *_b = NULL; if (_tmp) { *_sl = ls_bit(_tmp); _b = _tlsf->matrix[*_fl][*_sl]; } else { *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1))); if (*_fl > 0) { /* likely */ *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]); _b = _tlsf->matrix[*_fl][*_sl]; } } return _b; } #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do { \ _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \ if (_tlsf -> matrix[_fl][_sl]) \ _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \ else { \ clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \ if (!_tlsf -> sl_bitmap [_fl]) \ clear_bit (_fl, &_tlsf -> fl_bitmap); \ } \ _b -> ptr.free_ptr.prev = NULL; \ _b -> ptr.free_ptr.next = NULL; \ } while (0) #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do { \ if (_b -> ptr.free_ptr.next) \ _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \ if (_b -> ptr.free_ptr.prev) \ _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \ if (_tlsf -> matrix [_fl][_sl] == _b) { \ _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \ if (!_tlsf -> matrix [_fl][_sl]) { \ clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \ if (!_tlsf -> sl_bitmap [_fl]) \ clear_bit (_fl, &_tlsf -> fl_bitmap); \ } \ } \ _b -> ptr.free_ptr.prev = NULL; \ _b -> ptr.free_ptr.next = NULL; \ } while (0) #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do { \ _b -> ptr.free_ptr.prev = NULL; \ _b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \ if (_tlsf -> matrix [_fl][_sl]) \ _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \ _tlsf -> matrix [_fl][_sl] = _b; \ set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \ set_bit (_fl, &_tlsf -> fl_bitmap); \ } while (0) #if defined(USE_SBRK) || defined(USE_MMAP) static inline void* get_new_area(size_t * size) { void *area; #ifdef USE_SBRK area = (void *)sbrk(0); if (((void *)sbrk(*size)) != ((void *) -1)) return area; #endif #ifdef USE_MMAP *size = ROUNDUP(*size, PAGE_SIZE); if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED) return area; #endif return ((void *) ~0); } #endif static inline bhdr_t* process_area(void *area, size_t size) { bhdr_t *b, *lb, *ib; area_info_t *ai; ib = (bhdr_t *) area; ib->size = (sizeof(area_info_t) < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED; b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE); b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED; b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0; lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); lb->prev_hdr = b; lb->size = 0 | USED_BLOCK | PREV_FREE; ai = (area_info_t *) ib->ptr.buffer; ai->next = 0; ai->end = lb; return ib; } /******************************************************************/ /************************* Public Functions ***********************/ /******************************************************************/ tlsf_t* tlsf_init(void* area, size_t area_size) { tlsf_t *tlsf; bhdr_t *b, *ib; if (!area || !area_size || area_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) { ERROR_MSG("init_memory_pool (): memory_pool invalid\n"); return NULL; } if (((unsigned long) area & PTR_MASK)) { ERROR_MSG("init_memory_pool (): area must be aligned to a word\n"); return NULL; } tlsf = (tlsf_t *) area; /* Zero the memory pool (and tlsf_s) */ memset(area, 0, sizeof(tlsf_t)); ib = process_area(GET_NEXT_BLOCK(area, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(area_size - sizeof(tlsf_t))); b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE); tlsf_free(tlsf, b->ptr.buffer); tlsf->area_head = (area_info_t *) ib->ptr.buffer; #ifdef TLSF_STATISTICS tlsf->used_size = area_size - (b->size & BLOCK_SIZE); tlsf->max_size = tlsf->used_size; #endif return tlsf; } size_t tlsf_add_area(tlsf_t* tlsf, void *area, size_t area_size) { area_info_t *ptr, *ptr_prev, *ai; bhdr_t *ib0, *b0, *lb0, *ib1, *b1, *lb1, *next_b; memset(area, 0, area_size); ptr = tlsf->area_head; ptr_prev = 0; ib0 = process_area(area, area_size); b0 = GET_NEXT_BLOCK(ib0->ptr.buffer, ib0->size & BLOCK_SIZE); lb0 = GET_NEXT_BLOCK(b0->ptr.buffer, b0->size & BLOCK_SIZE); /* Before inserting the new area, we have to merge this area with the already existing ones */ while (ptr) { ib1 = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD); b1 = GET_NEXT_BLOCK(ib1->ptr.buffer, ib1->size & BLOCK_SIZE); lb1 = ptr->end; /* Merge the new area with the next physically contiguous one */ if ((unsigned long) ib1 == (unsigned long) lb0 + BHDR_OVERHEAD) { if (tlsf->area_head == ptr) { tlsf->area_head = ptr->next; ptr = ptr->next; } else { ptr_prev->next = ptr->next; ptr = ptr->next; } b0->size = ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) + (ib1->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | PREV_USED; b1->prev_hdr = b0; lb0 = lb1; continue; } /* Merge the new area with the previous physically contiguous one */ if ((unsigned long) lb1->ptr.buffer == (unsigned long) ib0) { if (tlsf->area_head == ptr) { tlsf->area_head = ptr->next; ptr = ptr->next; } else { ptr_prev->next = ptr->next; ptr = ptr->next; } lb1->size = ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) + (ib0->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | (lb1->size & PREV_STATE); next_b = GET_NEXT_BLOCK(lb1->ptr.buffer, lb1->size & BLOCK_SIZE); next_b->prev_hdr = lb1; b0 = lb1; ib0 = ib1; continue; } ptr_prev = ptr; ptr = ptr->next; } /* Insert the area in the list of linked areas */ ai = (area_info_t *) ib0->ptr.buffer; ai->next = tlsf->area_head; ai->end = lb0; tlsf->area_head = ai; tlsf_free(tlsf, b0->ptr.buffer); return (b0->size & BLOCK_SIZE); } size_t tlsf_get_used_size(tlsf_t* tlsf) { #ifdef TLSF_STATISTICS return rlsf->used_size; #else return 0; #endif } size_t tlsf_get_max_size(tlsf_t* tlsf) { #ifdef TLSF_STATISTICS return tlsf->max_size; #else return 0; #endif } void tlsf_destroy(tlsf_t* tlsf) { } void* tlsf_malloc(tlsf_t* tlsf, size_t size) { bhdr_t *b, *b2, *next_b; int fl, sl; size_t tmp_size; size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size); /* Rounding up the requested size and calculating fl and sl */ MAPPING_SEARCH(&size, &fl, &sl); /* Searching a free block, recall that this function changes the values of fl and sl, so they are no longer valid when the function fails */ b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl); #if defined(USE_MMAP) || defined(USE_SBRK) if (!b) { size_t area_size; void *area; /* Growing the pool size when needed */ area_size = size + BHDR_OVERHEAD * 8; /* size plus enough room for the requested headers. */ area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE; area = get_new_area(&area_size); /* Call sbrk or mmap */ if (area == ((void *) ~0)) return NULL; /* Not enough system memory */ tlsf_add_area(tlsf, area, area_size); /* Round up the requested size and calculate fl and sl */ MAPPING_SEARCH(&size, &fl, &sl); /* Find a free block */ b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl); } #endif if (!b) return NULL; /* Not found */ EXTRACT_BLOCK_HDR(b, tlsf, fl, sl); /*-- found: */ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); /* Should the block be split? */ tmp_size = (b->size & BLOCK_SIZE) - size; if (tmp_size >= sizeof(bhdr_t)) { tmp_size -= BHDR_OVERHEAD; b2 = GET_NEXT_BLOCK(b->ptr.buffer, size); b2->size = tmp_size | FREE_BLOCK | PREV_USED; next_b->prev_hdr = b2; MAPPING_INSERT(tmp_size, &fl, &sl); INSERT_BLOCK(b2, tlsf, fl, sl); b->size = size | (b->size & PREV_STATE); } else { next_b->size &= (~PREV_FREE); b->size &= (~FREE_BLOCK); /* Now it's used */ } TLSF_ADD_SIZE(tlsf, b); return (void *) b->ptr.buffer; } void tlsf_free(tlsf_t* tlsf, void* ptr) { bhdr_t *b, *tmp_b; int fl = 0, sl = 0; if (!ptr) { return; } b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD); b->size |= FREE_BLOCK; TLSF_REMOVE_SIZE(tlsf, b); b->ptr.free_ptr.prev = NULL; b->ptr.free_ptr.next = NULL; tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); if (tmp_b->size & FREE_BLOCK) { MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl); EXTRACT_BLOCK(tmp_b, tlsf, fl, sl); b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD; } if (b->size & PREV_FREE) { tmp_b = b->prev_hdr; MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl); EXTRACT_BLOCK(tmp_b, tlsf, fl, sl); tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; b = tmp_b; } MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl); INSERT_BLOCK(b, tlsf, fl, sl); tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); tmp_b->size |= PREV_FREE; tmp_b->prev_hdr = b; } void* tlsf_realloc(tlsf_t* tlsf, void* ptr, size_t new_size) { void *ptr_aux; unsigned int cpsize; bhdr_t *b, *tmp_b, *next_b; int fl, sl; size_t tmp_size; if (!ptr) { if (new_size) return (void *) tlsf_malloc(tlsf, new_size); if (!new_size) return NULL; } else if (!new_size) { tlsf_free(tlsf, ptr); return NULL; } b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD); next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); new_size = (new_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(new_size); tmp_size = (b->size & BLOCK_SIZE); if (new_size <= tmp_size) { TLSF_REMOVE_SIZE(tlsf, b); if (next_b->size & FREE_BLOCK) { MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl); EXTRACT_BLOCK(next_b, tlsf, fl, sl); tmp_size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD; next_b = GET_NEXT_BLOCK(next_b->ptr.buffer, next_b->size & BLOCK_SIZE); /* We always re-enter this free block because tmp_size will be greater then sizeof (bhdr_t) */ } tmp_size -= new_size; if (tmp_size >= sizeof(bhdr_t)) { tmp_size -= BHDR_OVERHEAD; tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size); tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED; next_b->prev_hdr = tmp_b; next_b->size |= PREV_FREE; MAPPING_INSERT(tmp_size, &fl, &sl); INSERT_BLOCK(tmp_b, tlsf, fl, sl); b->size = new_size | (b->size & PREV_STATE); } TLSF_ADD_SIZE(tlsf, b); return (void *) b->ptr.buffer; } if ((next_b->size & FREE_BLOCK)) { if (new_size <= (tmp_size + (next_b->size & BLOCK_SIZE))) { TLSF_REMOVE_SIZE(tlsf, b); MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl); EXTRACT_BLOCK(next_b, tlsf, fl, sl); b->size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD; next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE); next_b->prev_hdr = b; next_b->size &= ~PREV_FREE; tmp_size = (b->size & BLOCK_SIZE) - new_size; if (tmp_size >= sizeof(bhdr_t)) { tmp_size -= BHDR_OVERHEAD; tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size); tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED; next_b->prev_hdr = tmp_b; next_b->size |= PREV_FREE; MAPPING_INSERT(tmp_size, &fl, &sl); INSERT_BLOCK(tmp_b, tlsf, fl, sl); b->size = new_size | (b->size & PREV_STATE); } TLSF_ADD_SIZE(tlsf, b); return (void *) b->ptr.buffer; } } ptr_aux = tlsf_malloc(tlsf, new_size); cpsize = ((b->size & BLOCK_SIZE) > new_size) ? new_size : (b->size & BLOCK_SIZE); memcpy(ptr_aux, ptr, cpsize); tlsf_free(tlsf, ptr); return ptr_aux; } void* tlsf_calloc(tlsf_t* tlsf, size_t nelem, size_t elem_size) { void *ptr; if (nelem == 0 || elem_size == 0) return NULL; if (!(ptr = tlsf_malloc(tlsf, nelem * elem_size))) return NULL; memset(ptr, 0, nelem * elem_size); return ptr; }