diff --git a/common/bl_math/arm_dsp_wrapper.h b/common/bl_math/arm_dsp_wrapper.h index 29373b35..445b22a2 100644 --- a/common/bl_math/arm_dsp_wrapper.h +++ b/common/bl_math/arm_dsp_wrapper.h @@ -24,7 +24,7 @@ #ifndef __MY_MATH_F_H__ #define __MY_MATH_F_H__ -#include "bflb_platform.h" +#include "misc.h" #include "math.h" typedef float float32_t; diff --git a/common/device/drv_device.c b/common/device/drv_device.c index 75975d57..676b5111 100644 --- a/common/device/drv_device.c +++ b/common/device/drv_device.c @@ -20,7 +20,6 @@ * */ #include "drv_device.h" -#include "string.h" #define dev_open (dev->open) #define dev_close (dev->close) diff --git a/common/device/drv_device.h b/common/device/drv_device.h index 101d1cfa..a7a820f0 100644 --- a/common/device/drv_device.h +++ b/common/device/drv_device.h @@ -24,7 +24,7 @@ #define __DRV_DEVICE_H__ #include "drv_list.h" -#include "bflb_platform.h" +#include "stdio.h" #define DEVICE_NAME_MAX 20 /* max device name*/ @@ -63,7 +63,7 @@ #define DEVICE_EINVAL 22 /* Invalid argument */ #define DEVICE_ENOSPACE 23 /* No more Device for Allocate */ -#define __ASSERT_PRINT(fmt, ...) bflb_platform_printf(fmt, ##__VA_ARGS__) +#define __ASSERT_PRINT(fmt, ...) printf(fmt, ##__VA_ARGS__) #define __ASSERT_LOC(test) \ __ASSERT_PRINT("ASSERTION FAIL [%s] @ %s:%d\n", \ diff --git a/common/memheap/drv_mmheap.c b/common/memheap/drv_mmheap.c index f4a08bae..842525a3 100644 --- a/common/memheap/drv_mmheap.c +++ b/common/memheap/drv_mmheap.c @@ -1,794 +1,402 @@ -/** - * @file drv_mmheap.c - * @brief - * - * Copyright (c) 2021 Bouffalolab team - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - */ -#include "drv_mmheap.h" - -mmheap_ctl_t mmheap_ctl; - -static int generic_fls(uint32_t x) -{ - int r = 32; - - if (!x) { - return 0; - } - - if (!(x & 0xffff0000u)) { - x <<= 16; - r -= 16; - } - - if (!(x & 0xff000000u)) { - x <<= 8; - r -= 8; - } - - if (!(x & 0xf0000000u)) { - x <<= 4; - r -= 4; - } - - if (!(x & 0xc0000000u)) { - x <<= 2; - r -= 2; - } - - if (!(x & 0x80000000u)) { - x <<= 1; - r -= 1; - } - - return r; -} - -static int __ffs(uint32_t word) -{ - return generic_fls(word & (~word + 1)) - 1; -} - -static int __fls(uint32_t word) -{ - return generic_fls(word) - 1; -} - -static inline size_t blk_size(const mmheap_blk_t *blk) -{ - return blk->size & MMHEAP_BLOCK_SIZE_MASK; -} - -static inline void blk_set_size(mmheap_blk_t *blk, size_t size) -{ - blk->size = size | (blk->size & MMHEAP_BLOCK_STATE_MASK); -} - -static inline int blk_is_last(const mmheap_blk_t *blk) -{ - return blk_size(blk) == 0; -} - -static inline int blk_is_free(const mmheap_blk_t *blk) -{ - return blk->size & MMHEAP_BLOCK_CURR_FREE; -} - -static inline void blk_set_free(mmheap_blk_t *blk) -{ - blk->size |= MMHEAP_BLOCK_CURR_FREE; -} - -static inline void blk_set_used(mmheap_blk_t *blk) -{ - blk->size &= ~MMHEAP_BLOCK_CURR_FREE; -} - -static inline int blk_is_prev_free(const mmheap_blk_t *blk) -{ - return blk->size & MMHEAP_BLOCK_PREV_FREE; -} - -static inline void blk_set_prev_free(mmheap_blk_t *blk) -{ - blk->size |= MMHEAP_BLOCK_PREV_FREE; -} - -static void blk_set_prev_used(mmheap_blk_t *blk) -{ - blk->size &= ~MMHEAP_BLOCK_PREV_FREE; -} - -static inline mmheap_blk_t *blk_from_ptr(const void *ptr) -{ - return (mmheap_blk_t *)((uint32_t)(uintptr_t)ptr - MMHEAP_BLK_START_OFFSET); -} - -static inline void *blk_to_ptr(const mmheap_blk_t *blk) -{ - return (void *)((uint32_t)(uintptr_t)blk + MMHEAP_BLK_START_OFFSET); -} - -/* Return location of next block after block of given size. */ -static inline mmheap_blk_t *offset_to_blk(const void *ptr, int diff) -{ - return (mmheap_blk_t *)(uintptr_t)((uint32_t)(uintptr_t)ptr + diff); -} - -/* Return location of previous block. */ -static inline mmheap_blk_t *blk_prev(const mmheap_blk_t *blk) -{ - return blk->prev_phys_blk; -} - -/* Return location of next existing block. */ -static mmheap_blk_t *blk_next(const mmheap_blk_t *blk) -{ - mmheap_blk_t *next_blk; - - next_blk = offset_to_blk(blk_to_ptr(blk), blk_size(blk) - MMHEAP_BLK_HEADER_OVERHEAD); - return next_blk; -} - -/* Link a new block with its physical neighbor, return the neighbor. */ -static mmheap_blk_t *blk_link_next(mmheap_blk_t *blk) -{ - mmheap_blk_t *next_blk; - - next_blk = blk_next(blk); - next_blk->prev_phys_blk = blk; - return next_blk; -} - -static void blk_mark_as_free(mmheap_blk_t *blk) -{ - mmheap_blk_t *next_blk; - - /* Link the block to the next block, first. */ - next_blk = blk_link_next(blk); - blk_set_prev_free(next_blk); - blk_set_free(blk); -} - -static void blk_mark_as_used(mmheap_blk_t *blk) -{ - mmheap_blk_t *next_blk; - - next_blk = blk_next(blk); - blk_set_prev_used(next_blk); - blk_set_used(blk); -} - -static inline size_t align_up(size_t x, size_t align) -{ - return (x + (align - 1)) & ~(align - 1); -} - -static inline size_t align_down(size_t x, size_t align) -{ - return x - (x & (align - 1)); -} - -static inline void *align_ptr(const void *ptr, size_t align) -{ - return (void *)(((uint32_t)(uintptr_t)ptr + (align - 1)) & ~(align - 1)); -} - -/* -** Adjust an allocation size to be aligned to word size, and no smaller -** than internal minimum. -*/ -static size_t adjust_request_size(size_t size, size_t align) -{ - size_t adjust_size = 0; - - if (!size) { - return 0; - } - - adjust_size = align_up(size, align); - - if (adjust_size > MMHEAP_BLK_SIZE_MAX) { - return 0; - } - - /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ - return adjust_size > MMHEAP_BLK_SIZE_MIN ? adjust_size : MMHEAP_BLK_SIZE_MIN; -} - -/* -** TLSF utility functions. In most cases, these are direct translations of -** the documentation found in the white paper. -*/ -static void mapping_insert(size_t size, int *fli, int *sli) -{ - int fl, sl; - - if (size < MMHEAP_SMALL_BLOCK_SIZE) { - /* Store small blocks in first list. */ - fl = 0; - sl = (int)size / (MMHEAP_SMALL_BLOCK_SIZE / MMHEAP_SL_INDEX_COUNT); - } else { - fl = __fls(size); - sl = ((int)size >> (fl - MMHEAP_SL_INDEX_COUNT_LOG2)) ^ (1 << MMHEAP_SL_INDEX_COUNT_LOG2); - fl -= (MMHEAP_FL_INDEX_SHIFT - 1); - } - - *fli = fl; - *sli = sl; -} - -/* This version rounds up to the next block size (for allocations) */ -static void mapping_search(size_t size, int *fli, int *sli) -{ - size_t round; - - if (size >= MMHEAP_SMALL_BLOCK_SIZE) { - round = (1 << (__fls(size) - MMHEAP_SL_INDEX_COUNT_LOG2)) - 1; - size += round; - } - - mapping_insert(size, fli, sli); -} - -static mmheap_blk_t *blk_search_suitable(int *fli, int *sli) -{ - int fl, sl; - uint32_t sl_map, fl_map; - - fl = *fli; - sl = *sli; - - /* - ** First, search for a block in the list associated with the given - ** fl/sl index. - */ - sl_map = mmheap_ctl.sl_bitmap[fl] & (~0U << sl); - - if (!sl_map) { - /* No block exists. Search in the next largest first-level list. */ - fl_map = mmheap_ctl.fl_bitmap & (~0U << (fl + 1)); - - if (!fl_map) { - /* No free blocks available, memory has been exhausted. */ - return 0; - } - - fl = __ffs(fl_map); - *fli = fl; - sl_map = mmheap_ctl.sl_bitmap[fl]; - } - - sl = __ffs(sl_map); - *sli = sl; - - /* Return the first block in the free list. */ - return mmheap_ctl.blocks[fl][sl]; -} - -/* Insert a free block into the free block list. */ -static void insert_free_block(mmheap_blk_t *blk, int fl, int sl) -{ - mmheap_blk_t *curr; - - curr = mmheap_ctl.blocks[fl][sl]; - blk->next_free = curr; - blk->prev_free = &mmheap_ctl.block_null; - curr->prev_free = blk; - - /* - ** Insert the new block at the head of the list, and mark the first- - ** and second-level bitmaps appropriately. - */ - mmheap_ctl.blocks[fl][sl] = blk; - mmheap_ctl.fl_bitmap |= (1 << fl); - mmheap_ctl.sl_bitmap[fl] |= (1 << sl); -} - -/* Remove a free block from the free list.*/ -static void remove_free_block(mmheap_blk_t *blk, int fl, int sl) -{ - mmheap_blk_t *prev_blk; - mmheap_blk_t *next_blk; - - prev_blk = blk->prev_free; - next_blk = blk->next_free; - next_blk->prev_free = prev_blk; - prev_blk->next_free = next_blk; - - /* If this block is the head of the free list, set new head. */ - if (mmheap_ctl.blocks[fl][sl] == blk) { - mmheap_ctl.blocks[fl][sl] = next_blk; - - /* If the new head is null, clear the bitmap. */ - if (next_blk == &mmheap_ctl.block_null) { - mmheap_ctl.sl_bitmap[fl] &= ~(1 << sl); - - /* If the second bitmap is now empty, clear the fl bitmap. */ - if (!mmheap_ctl.sl_bitmap[fl]) { - mmheap_ctl.fl_bitmap &= ~(1 << fl); - } - } - } -} - -/* Remove a given block from the free list. */ -static void blk_remove(mmheap_blk_t *blk) -{ - int fl, sl; - - mapping_insert(blk_size(blk), &fl, &sl); - remove_free_block(blk, fl, sl); -} - -/* Insert a given block into the free list. */ -static void blk_insert(mmheap_blk_t *blk) -{ - int fl, sl; - - mapping_insert(blk_size(blk), &fl, &sl); - insert_free_block(blk, fl, sl); -} - -static int blk_can_split(mmheap_blk_t *blk, size_t size) -{ - return blk_size(blk) >= sizeof(mmheap_blk_t) + size; -} - -/* Split a block into two, the second of which is free. */ -static mmheap_blk_t *blk_split(mmheap_blk_t *blk, size_t size) -{ - mmheap_blk_t *remaining; - size_t remain_size; - - /* Calculate the amount of space left in the remaining block. */ - remaining = offset_to_blk(blk_to_ptr(blk), size - MMHEAP_BLK_HEADER_OVERHEAD); - remain_size = blk_size(blk) - (size + MMHEAP_BLK_HEADER_OVERHEAD); - - blk_set_size(remaining, remain_size); - - blk_set_size(blk, size); - blk_mark_as_free(remaining); - - return remaining; -} - -/* Absorb a free block's storage into an adjacent previous free block. */ -static mmheap_blk_t *blk_absorb(mmheap_blk_t *prev_blk, mmheap_blk_t *blk) -{ - prev_blk->size += blk_size(blk) + MMHEAP_BLK_HEADER_OVERHEAD; - blk_link_next(prev_blk); - return prev_blk; -} - -/* Merge a just-freed block with an adjacent previous free block. */ -static mmheap_blk_t *blk_merge_prev(mmheap_blk_t *blk) -{ - mmheap_blk_t *prev_blk; - - if (blk_is_prev_free(blk)) { - prev_blk = blk_prev(blk); - blk_remove(prev_blk); - blk = blk_absorb(prev_blk, blk); - } - - return blk; -} - -/* Merge a just-freed block with an adjacent free block. */ -static mmheap_blk_t *blk_merge_next(mmheap_blk_t *blk) -{ - mmheap_blk_t *next_blk; - - next_blk = blk_next(blk); - - if (blk_is_free(next_blk)) { - blk_remove(next_blk); - blk = blk_absorb(blk, next_blk); - } - - return blk; -} - -/* Trim any trailing block space off the end of a block, return to pool. */ -static void blk_trim_free(mmheap_blk_t *blk, size_t size) -{ - mmheap_blk_t *remaining_blk; - - if (blk_can_split(blk, size)) { - remaining_blk = blk_split(blk, size); - blk_link_next(blk); - blk_set_prev_free(remaining_blk); - blk_insert(remaining_blk); - } -} - -/* Trim any trailing block space off the end of a used block, return to pool. */ -static void blk_trim_used(mmheap_blk_t *blk, size_t size) -{ - mmheap_blk_t *remaining_blk; - - if (blk_can_split(blk, size)) { - /* If the next block is free, we must coalesce. */ - remaining_blk = blk_split(blk, size); - blk_set_prev_used(remaining_blk); - - remaining_blk = blk_merge_next(remaining_blk); - blk_insert(remaining_blk); - } -} - -static mmheap_blk_t *blk_trim_free_leading(mmheap_blk_t *blk, size_t size) -{ - mmheap_blk_t *remaining_blk; - - remaining_blk = blk; - - if (blk_can_split(blk, size)) { - /* We want the 2nd block. */ - remaining_blk = blk_split(blk, size - MMHEAP_BLK_HEADER_OVERHEAD); - blk_set_prev_free(remaining_blk); - - blk_link_next(blk); - blk_insert(blk); - } - - return remaining_blk; -} - -static mmheap_blk_t *blk_locate_free(size_t size) -{ - int fl = 0, sl = 0; - mmheap_blk_t *blk = NULL; - - if (!size) { - return NULL; - } - - mapping_search(size, &fl, &sl); - - /* - ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up - ** with indices that are off the end of the block array. - ** So, we protect against that here, since this is the only callsite of mapping_search. - ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. - */ - if (fl < MMHEAP_FL_INDEX_COUNT) { - blk = blk_search_suitable(&fl, &sl); - } - - if (blk) { - remove_free_block(blk, fl, sl); - } - - return blk; -} - -static void *blk_prepare_used(mmheap_blk_t *blk, size_t size) -{ - if (!blk) { - return NULL; - } - - blk_trim_free(blk, size); - blk_mark_as_used(blk); - return blk_to_ptr(blk); -} - -static void control_construct(void) -{ - int i, j; - - mmheap_ctl.pool_cnt = 0u; - - for (i = 0; i < MMHEAP_POOL_MAX; ++i) { - mmheap_ctl.pool_start[i] = (void *)NULL; - } - - mmheap_ctl.block_null.next_free = &mmheap_ctl.block_null; - mmheap_ctl.block_null.prev_free = &mmheap_ctl.block_null; - - mmheap_ctl.fl_bitmap = 0; - - for (i = 0; i < MMHEAP_FL_INDEX_COUNT; ++i) { - mmheap_ctl.sl_bitmap[i] = 0; - - for (j = 0; j < MMHEAP_SL_INDEX_COUNT; ++j) { - mmheap_ctl.blocks[i][j] = &mmheap_ctl.block_null; - } - } -} - -static inline int mmheap_pool_is_full(void) -{ - return mmheap_ctl.pool_cnt == MMHEAP_POOL_MAX; -} - -static int mmheap_pool_is_exist(void *pool_start) -{ - int i = 0; - - for (i = 0; i < mmheap_ctl.pool_cnt; ++i) { - if (mmheap_ctl.pool_start[i] == pool_start) { - return 1; - } - } - - return 0; -} - -static inline void mmheap_pool_record(void *pool_start) -{ - mmheap_ctl.pool_start[mmheap_ctl.pool_cnt++] = pool_start; -} - -static void mmheap_pool_unrecord(void *pool_start) -{ - int i = 0; - - for (i = 0; i < mmheap_ctl.pool_cnt; ++i) { - if (mmheap_ctl.pool_start[i] == pool_start) { - break; - } - } - - if (i != mmheap_ctl.pool_cnt - 1) { - mmheap_ctl.pool_start[i] = mmheap_ctl.pool_start[mmheap_ctl.pool_cnt - 1]; - } - - --mmheap_ctl.pool_cnt; -} - -int mmheap_init_with_pool(void *pool_start, size_t pool_size) -{ - control_construct(); - - return mmheap_pool_add(pool_start, pool_size); -} - -void *mmheap_alloc(size_t size) -{ - size_t adjust_size; - mmheap_blk_t *blk; - - adjust_size = adjust_request_size(size, MMHEAP_ALIGN_SIZE); - blk = blk_locate_free(adjust_size); - - if (!blk) { - return NULL; - } - - return blk_prepare_used(blk, adjust_size); -} - -void *mmheap_calloc(size_t num, size_t size) -{ - void *ptr; - - ptr = mmheap_alloc(num * size); - - if (ptr) { - memset(ptr, 0, num * size); - } - - return ptr; -} - -void *mmheap_aligned_alloc(size_t size, size_t align) -{ - mmheap_blk_t *blk; - void *ptr, *aligned, *next_aligned; - size_t adjust_size, aligned_size; - size_t gap_minimum, size_with_gap, gap, gap_remain, offset; - - adjust_size = adjust_request_size(size, MMHEAP_ALIGN_SIZE); - gap_minimum = sizeof(mmheap_blk_t); - size_with_gap = adjust_request_size(adjust_size + align + gap_minimum, align); - aligned_size = (adjust_size && align > MMHEAP_ALIGN_SIZE) ? size_with_gap : adjust_size; - - blk = blk_locate_free(aligned_size); - - if (!blk) { - return NULL; - } - - ptr = blk_to_ptr(blk); - aligned = align_ptr(ptr, align); - gap = (size_t)((uint32_t)(uintptr_t)aligned - (uint32_t)(uintptr_t)ptr); - - if (gap && gap < gap_minimum) { - gap_remain = gap_minimum - gap; - offset = gap_remain > align ? gap_remain : align; - next_aligned = (void *)((uint32_t)(uintptr_t)aligned + offset); - - aligned = align_ptr(next_aligned, align); - gap = (size_t)((uint32_t)(uintptr_t)aligned - (uint32_t)(uintptr_t)ptr); - } - - if (gap) { - blk = blk_trim_free_leading(blk, gap); - } - - return blk_prepare_used(blk, adjust_size); -} - -void mmheap_free(void *ptr) -{ - mmheap_blk_t *blk; - - if (!ptr) { - return; - } - - blk = blk_from_ptr(ptr); - blk_mark_as_free(blk); - blk = blk_merge_prev(blk); - blk = blk_merge_next(blk); - blk_insert(blk); -} - -void *mmheap_realloc(void *ptr, size_t size) -{ - void *p = 0; - mmheap_blk_t *curr_blk, *next_blk; - size_t curr_size, combined_size, adjust_size, min_size; - - if (ptr && size == 0) { - mmheap_free(ptr); - return NULL; - } - - if (!ptr) { - return mmheap_alloc(size); - } - - curr_blk = blk_from_ptr(ptr); - next_blk = blk_next(curr_blk); - - curr_size = blk_size(curr_blk); - combined_size = curr_size + blk_size(next_blk) + MMHEAP_BLK_HEADER_OVERHEAD; - adjust_size = adjust_request_size(size, MMHEAP_ALIGN_SIZE); - - if (adjust_size > curr_size && (!blk_is_free(next_blk) || adjust_size > combined_size)) { - p = mmheap_alloc(size); - - if (p) { - min_size = curr_size < size ? curr_size : size; - memcpy(p, ptr, min_size); - mmheap_free(ptr); - } - } else { - if (adjust_size > curr_size) { - blk_merge_next(curr_blk); - blk_mark_as_used(curr_blk); - } - - blk_trim_used(curr_blk, adjust_size); - p = ptr; - } - - return p; -} - -int mmheap_pool_add(void *pool_start, size_t pool_size) -{ - mmheap_blk_t *curr_blk; - mmheap_blk_t *next_blk; - size_t size_aligned; - - if (mmheap_pool_is_full()) { - return MEMHEAP_STATUS_OVERFLOW; - } - - if (mmheap_pool_is_exist(pool_start)) { - return MEMHEAP_STATUS_ALREADY_EXIST; - } - - size_aligned = align_down(pool_size - 2 * MMHEAP_BLK_HEADER_OVERHEAD, MMHEAP_ALIGN_SIZE); - - if (((uint32_t)(uintptr_t)pool_start % MMHEAP_ALIGN_SIZE) != 0u) { - return MEMHEAP_STATUS_INVALID_ADDR; - } - - if (size_aligned < MMHEAP_BLK_SIZE_MIN || - size_aligned > MMHEAP_BLK_SIZE_MAX) { - return MEMHEAP_STATUS_INVALID_SIZE; - } - - /* - ** Create the main free block. Offset the start of the block slightly - ** so that the prev_phys_block field falls outside of the pool - - ** it will never be used. - */ - curr_blk = offset_to_blk(pool_start, -(int)MMHEAP_BLK_HEADER_OVERHEAD); - blk_set_size(curr_blk, size_aligned); - blk_set_free(curr_blk); - blk_set_prev_used(curr_blk); - blk_insert(curr_blk); - - /* Split the block to create a zero-size sentinel block. */ - next_blk = blk_link_next(curr_blk); - blk_set_size(next_blk, 0); - blk_set_used(next_blk); - blk_set_prev_free(next_blk); - - mmheap_pool_record(pool_start); - - return MEMHEAP_STATUS_OK; -} - -int mmheap_pool_rmv(void *pool_start) -{ - int fl = 0, sl = 0; - mmheap_blk_t *blk; - - if (!mmheap_pool_is_exist(pool_start)) { - return MEMHEAP_STATUS_ALREADY_NOT_EXIST; - } - - blk = offset_to_blk(pool_start, -(int)MMHEAP_BLK_HEADER_OVERHEAD); - mapping_insert(blk_size(blk), &fl, &sl); - remove_free_block(blk, fl, sl); - - mmheap_pool_unrecord(pool_start); - return MEMHEAP_STATUS_OK; -} - -int mmheap_pool_check(void *pool_start, mmheap_info_t *info) -{ - mmheap_blk_t *blk; - - memset(info, 0, sizeof(mmheap_info_t)); - - blk = offset_to_blk(pool_start, -(int)MMHEAP_BLK_HEADER_OVERHEAD); - - while (blk && !blk_is_last(blk)) { - if (blk_is_free(blk)) { - info->free += blk_size(blk); - } else { - info->used += blk_size(blk); - } - - blk = blk_next(blk); - } - - return MEMHEAP_STATUS_OK; -} - -int mmheap_check(mmheap_info_t *info) -{ - int i; - int err; - mmheap_info_t pool_info; - - memset(info, 0, sizeof(mmheap_info_t)); - - for (i = 0; i < mmheap_ctl.pool_cnt; ++i) { - err = mmheap_pool_check(mmheap_ctl.pool_start[i], &pool_info); - - if (err != MEMHEAP_STATUS_OK) { - return err; - } - - info->free += pool_info.free; - info->used += pool_info.used; - } - - return 0; -} +/** + * @file drv_mmheap.c + * @brief + * + * Copyright (c) 2021 Bouffalolab team + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + */ + +#include "drv_mmheap.h" + +#define MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT 8 +#define MEM_MANAGE_BITS_PER_BYTE 8 +#define MEM_MANAGE_MEM_STRUCT_SIZE mmheap_align_up(sizeof(struct heap_node), MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT) +#define MEM_MANAGE_MINUM_MEM_SIZE (MEM_MANAGE_MEM_STRUCT_SIZE << 1) +#define MEM_MANAGE_ALLOCA_LABAL ((size_t)(1 << (sizeof(size_t) * MEM_MANAGE_BITS_PER_BYTE - 1))) + +static inline size_t mmheap_align_down(size_t data, size_t align_byte) +{ + return data & ~(align_byte - 1); +} + +static inline size_t mmheap_align_up(size_t data, size_t align_byte) +{ + return (data + align_byte - 1) & ~(align_byte - 1); +} + +static inline struct heap_node *mmheap_addr_sub(const void *addr) +{ + return (struct heap_node *)((const uint8_t *)addr - MEM_MANAGE_MEM_STRUCT_SIZE); +} + +static inline void *mmheap_addr_add(const struct heap_node *mem_node) +{ + return (void *)((const uint8_t *)mem_node + MEM_MANAGE_MEM_STRUCT_SIZE); +} + +/** + * @brief mmheap_insert_node_to_freelist + * + * @param pRoot + * @param pNode + */ +static inline void mmheap_insert_node_to_freelist(struct heap_info *pRoot, struct heap_node *pNode) +{ + struct heap_node *pPriv_Node; + struct heap_node *pNext_Node; + /*Find the node with an address similar to pNode*/ + for (pPriv_Node = pRoot->pStart; pPriv_Node->next_node < pNode; pPriv_Node = pPriv_Node->next_node) { + } + + pNext_Node = pPriv_Node->next_node; + /*Try to merge the pNode with the previous block*/ + if ((uint8_t *)mmheap_addr_add(pPriv_Node) + pPriv_Node->mem_size == (uint8_t *)pNode) { + if (pPriv_Node != pRoot->pStart) { /*can merge if not start block*/ + pPriv_Node->mem_size += MEM_MANAGE_MEM_STRUCT_SIZE + pNode->mem_size; + pNode = pPriv_Node; + } else { + /*The latter is not merged if it is a Start block to avoid wasting memory*/ + pRoot->pStart->next_node = pNode; + } + } else { + /*Insert directly into the free single-chain table when merging is not possible*/ + pPriv_Node->next_node = pNode; + } + /*Try to merge the pNode with the next block*/ + if ((uint8_t *)mmheap_addr_add(pNode) + pNode->mem_size == (uint8_t *)pNext_Node) { + if (pNext_Node != pRoot->pEnd) { + pNode->mem_size += MEM_MANAGE_MEM_STRUCT_SIZE + pNext_Node->mem_size; + pNode->next_node = pNext_Node->next_node; + } else { + pNode->next_node = pRoot->pEnd; + } + } else { + /*Insert directly into the free single-chain table when merging is not possible*/ + pNode->next_node = pNext_Node; + } +} + +/** + * @brief mmheap_get_state + * + * @param pRoot + * @param pState + */ +void mmheap_get_state(struct heap_info *pRoot, struct heap_state *pState) +{ + MMHEAP_ASSERT(pRoot->pStart != NULL); + MMHEAP_ASSERT(pRoot->pEnd != NULL); + pState->max_node_size = pRoot->pStart->next_node->mem_size; + pState->min_node_size = pRoot->pStart->next_node->mem_size; + pState->remain_size = 0; + pState->free_node_num = 0; + MMHEAP_LOCK(); + for (struct heap_node *pNode = pRoot->pStart->next_node; pNode->next_node != NULL; pNode = pNode->next_node) { + pState->remain_size += pNode->mem_size; + pState->free_node_num++; + if (pNode->mem_size > pState->max_node_size) + pState->max_node_size = pNode->mem_size; + if (pNode->mem_size < pState->min_node_size) + pState->min_node_size = pNode->mem_size; + } + MMHEAP_UNLOCK(); +} +/** + * @brief mmheap_align_alloc + * + * @param pRoot + * @param align_size + * @param want_size + * @return void* + */ +void *mmheap_align_alloc(struct heap_info *pRoot, size_t align_size, size_t want_size) +{ + void *pReturn = NULL; + struct heap_node *pPriv_Node, *pNow_Node; + + MMHEAP_ASSERT(pRoot->pStart != NULL); + MMHEAP_ASSERT(pRoot->pEnd != NULL); + + if (want_size == 0) { + return NULL; + } + + if ((want_size & MEM_MANAGE_ALLOCA_LABAL) != 0) { + MMHEAP_MALLOC_FAIL(); + return NULL; + } + + if (align_size & (align_size - 1)) { + MMHEAP_MALLOC_FAIL(); + return NULL; + } + + MMHEAP_LOCK(); + if (want_size < MEM_MANAGE_MINUM_MEM_SIZE) + want_size = MEM_MANAGE_MINUM_MEM_SIZE; + if (align_size < MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT) + align_size = MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT; + + want_size = mmheap_align_up(want_size, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT); + + pPriv_Node = pRoot->pStart; + pNow_Node = pRoot->pStart->next_node; + + while (pNow_Node->next_node != NULL) { + if (pNow_Node->mem_size >= want_size + MEM_MANAGE_MEM_STRUCT_SIZE) { + size_t use_align_size; + size_t new_size; + pReturn = (void *)mmheap_align_up((size_t)mmheap_addr_add(pNow_Node), align_size); /*Calculate the aligned address*/ + use_align_size = (uint8_t *)pReturn - (uint8_t *)mmheap_addr_add(pNow_Node); /*Calculate the memory consumed by the alignment*/ + if (use_align_size != 0) { /*if Memory misalignment*/ + if (use_align_size < MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE) { /*The unaligned value is too small*/ + pReturn = (void *)mmheap_align_up( + (size_t)mmheap_addr_add(pNow_Node) + MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE, align_size); + use_align_size = (uint8_t *)pReturn - (uint8_t *)mmheap_addr_add(pNow_Node); + } + if (use_align_size <= pNow_Node->mem_size) { + new_size = pNow_Node->mem_size - use_align_size; /*Calculate the remaining memory size by removing the memory consumed by alignment*/ + if (new_size >= want_size) { /*Meet the conditions for distribution*/ + struct heap_node *pNew_Node = mmheap_addr_sub(pReturn); + pNow_Node->mem_size -= new_size + MEM_MANAGE_MEM_STRUCT_SIZE; /*Split Node*/ + pNew_Node->mem_size = new_size; /*The new node is also not in the free chain and does not need to be discharged from the free chain*/ + pNew_Node->next_node = NULL; + pNow_Node = pNew_Node; + break; + } + } + } else { /*Memory is directly aligned*/ + pPriv_Node->next_node = pNow_Node->next_node; + pNow_Node->next_node = NULL; + break; + } + } + pPriv_Node = pNow_Node; + pNow_Node = pNow_Node->next_node; + } + + if (pNow_Node == pRoot->pEnd) { + MMHEAP_UNLOCK(); + MMHEAP_MALLOC_FAIL(); + return NULL; + } + + if (pNow_Node->mem_size >= MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE + want_size) { /*Node memory is still available*/ + struct heap_node *pNew_Node = (struct heap_node *)((uint8_t *)mmheap_addr_add(pNow_Node) + want_size); /*Calculate the address of the node that will be moved into the free chain table*/ + pNew_Node->mem_size = pNow_Node->mem_size - want_size - MEM_MANAGE_MEM_STRUCT_SIZE; + pNew_Node->next_node = NULL; + pNow_Node->mem_size = want_size; + mmheap_insert_node_to_freelist(pRoot, pNew_Node); + } + pNow_Node->mem_size |= MEM_MANAGE_ALLOCA_LABAL; + MMHEAP_UNLOCK(); + return pReturn; +} +/** + * @brief mmheap_alloc + * + * @param pRoot + * @param want_size + * @return void* + */ +void *mmheap_alloc(struct heap_info *pRoot, size_t want_size) +{ + return mmheap_align_alloc(pRoot, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT, want_size); +} +/** + * @brief mmheap_realloc + * + * @param pRoot + * @param src_addr + * @param want_size + * @return void* + */ +void *mmheap_realloc(struct heap_info *pRoot, void *src_addr, size_t want_size) +{ + void *pReturn = NULL; + struct heap_node *pNext_Node, *pPriv_Node; + struct heap_node *pSrc_Node; + MMHEAP_ASSERT(pRoot->pStart != NULL); + MMHEAP_ASSERT(pRoot->pEnd != NULL); + if (src_addr == NULL) { + return mmheap_align_alloc(pRoot, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT, want_size); + } + if (want_size == 0) { + mmheap_free(pRoot, src_addr); + return NULL; + } + + MMHEAP_LOCK(); + if ((want_size & MEM_MANAGE_ALLOCA_LABAL) != 0) { + MMHEAP_UNLOCK(); + MMHEAP_MALLOC_FAIL(); + return NULL; + } + + pSrc_Node = mmheap_addr_sub(src_addr); + + if ((pSrc_Node->mem_size & MEM_MANAGE_ALLOCA_LABAL) == 0) { + MMHEAP_UNLOCK(); + MMHEAP_ASSERT((pSrc_Node->mem_size & MEM_MANAGE_ALLOCA_LABAL) != 0); + MMHEAP_MALLOC_FAIL(); + return NULL; + } + + pSrc_Node->mem_size &= ~MEM_MANAGE_ALLOCA_LABAL; + if (pSrc_Node->mem_size >= want_size) { + pSrc_Node->mem_size |= MEM_MANAGE_ALLOCA_LABAL; + pReturn = src_addr; + MMHEAP_UNLOCK(); + return pReturn; + } + /*Start looking in the free list for blocks similar to this block*/ + for (pPriv_Node = pRoot->pStart; pPriv_Node->next_node < pSrc_Node; pPriv_Node = pPriv_Node->next_node) { + } + pNext_Node = pPriv_Node->next_node; + + if (pNext_Node != pRoot->pEnd && + ((uint8_t *)src_addr + pSrc_Node->mem_size == (uint8_t *)pNext_Node) && + (pSrc_Node->mem_size + pNext_Node->mem_size + MEM_MANAGE_MEM_STRUCT_SIZE >= want_size)) { + /*Meet next node non-end, memory contiguous, enough memory left*/ + pReturn = src_addr; + pPriv_Node->next_node = pNext_Node->next_node; + pSrc_Node->mem_size += MEM_MANAGE_MEM_STRUCT_SIZE + pNext_Node->mem_size; + want_size = mmheap_align_up(want_size, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT); + if (pSrc_Node->mem_size >= MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE + want_size) { /*Removing the remaining space allocated is enough to open new blocks*/ + struct heap_node *pNew_Node = (struct heap_node *)((uint8_t *)mmheap_addr_add(pSrc_Node) + want_size); + pNew_Node->next_node = NULL; + pNew_Node->mem_size = pSrc_Node->mem_size - want_size - MEM_MANAGE_MEM_STRUCT_SIZE; + pSrc_Node->mem_size = want_size; + mmheap_insert_node_to_freelist(pRoot, pNew_Node); + } + pSrc_Node->mem_size |= MEM_MANAGE_ALLOCA_LABAL; + MMHEAP_UNLOCK(); + } else { + MMHEAP_UNLOCK(); + pReturn = mmheap_align_alloc(pRoot, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT, want_size); + if (pReturn == NULL) { + pSrc_Node->mem_size |= MEM_MANAGE_ALLOCA_LABAL; + MMHEAP_MALLOC_FAIL(); + return NULL; + } + MMHEAP_LOCK(); + memcpy(pReturn, src_addr, pSrc_Node->mem_size); + pSrc_Node->mem_size |= MEM_MANAGE_ALLOCA_LABAL; + MMHEAP_UNLOCK(); + mmheap_free(pRoot, src_addr); + } + return pReturn; +} + +/** + * @brief mmheap_free + * + * @param pRoot + * @param addr + */ +void mmheap_free(struct heap_info *pRoot, void *addr) +{ + struct heap_node *pFree_Node; + MMHEAP_ASSERT(pRoot->pStart != NULL); + MMHEAP_ASSERT(pRoot->pEnd != NULL); + MMHEAP_LOCK(); + if (addr == NULL) { + MMHEAP_UNLOCK(); + return; + } + pFree_Node = mmheap_addr_sub(addr); + + if ((pFree_Node->mem_size & MEM_MANAGE_ALLOCA_LABAL) == 0) { + MMHEAP_UNLOCK(); + MMHEAP_ASSERT((pFree_Node->mem_size & MEM_MANAGE_ALLOCA_LABAL) != 0); + return; + } + + if (pFree_Node->next_node != NULL) { + MMHEAP_UNLOCK(); + MMHEAP_ASSERT(pFree_Node->next_node == NULL); + return; + } + pFree_Node->mem_size &= ~MEM_MANAGE_ALLOCA_LABAL; + mmheap_insert_node_to_freelist(pRoot, pFree_Node); + MMHEAP_UNLOCK(); +} +/** + * @brief mmheap_init + * + * @param pRoot + * @param pRegion + */ +void mmheap_init(struct heap_info *pRoot, const struct heap_region *pRegion) +{ + struct heap_node *align_addr; + size_t align_size; + struct heap_node *pPriv_node = NULL; + + pRoot->total_size = 0; + pRoot->pEnd = NULL; + pRoot->pStart = NULL; + + for (; pRegion->addr != NULL; pRegion++) { + align_addr = (struct heap_node *)mmheap_align_up((size_t)pRegion->addr, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT); /*Calculate the aligned address*/ + if ((uint8_t *)align_addr > pRegion->mem_size + (uint8_t *)pRegion->addr) /*Alignment consumes more memory than the memory area*/ + continue; + align_size = pRegion->mem_size - ((uint8_t *)align_addr - (uint8_t *)pRegion->addr); /*Calculate the size of memory left after alignment*/ + if (align_size < MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE) /*if Aligning the remaining memory is too small*/ + continue; + align_size -= MEM_MANAGE_MEM_STRUCT_SIZE; /*Find the size of the memory block after removing the table header*/ + align_addr->mem_size = align_size; + align_addr->next_node = NULL; + if (pRoot->pStart == NULL) { + pRoot->pStart = align_addr; /*set current addr for start*/ + if (align_size >= MEM_MANAGE_MINUM_MEM_SIZE + MEM_MANAGE_MEM_STRUCT_SIZE) { /*If the remaining blocks are large enough*/ + align_size -= MEM_MANAGE_MEM_STRUCT_SIZE; /*Remove the next block of table headers remaining memory size*/ + align_addr = (struct heap_node *)((uint8_t *)pRoot->pStart + MEM_MANAGE_MEM_STRUCT_SIZE); //the next block addr + align_addr->mem_size = align_size; + align_addr->next_node = NULL; + pRoot->pStart->mem_size = 0; + pRoot->pStart->next_node = align_addr; + pRoot->total_size = align_addr->mem_size; + } else { /*The memory is too small, and the address of the current memory block is recorded as start*/ + pRoot->total_size = 0; + pRoot->pStart->mem_size = 0; + } + } else { + pPriv_node->next_node = align_addr; + pRoot->total_size += align_size; + } + pPriv_node = align_addr; + } + //At this point, pPriv_node is the last block, then place the end of the table at the end of the block, find the address to place the end block, end block is only convenient for traversal, so as small as possible, assigned to MEM_MANAGE_MEM_STRUCT_SIZE + align_addr = (struct heap_node *)mmheap_align_down( + (size_t)mmheap_addr_add(pPriv_node) + pPriv_node->mem_size - MEM_MANAGE_MEM_STRUCT_SIZE, MEM_MANAGE_ALIGNMENT_BYTE_DEFAULT); + align_size = (uint8_t *)align_addr - (uint8_t *)mmheap_addr_add(pPriv_node); /*Find the remaining size of the previous block after the end block is allocated*/ + if (align_size >= MEM_MANAGE_MINUM_MEM_SIZE) { + pRoot->total_size -= pPriv_node->mem_size - align_size; /*Removing memory consumed by allocating end blocks*/ + pRoot->pEnd = align_addr; /*Update the address at the end of the list*/ + pPriv_node->next_node = align_addr; + pPriv_node->mem_size = align_size; + align_addr->next_node = NULL; + align_addr->mem_size = 0; /*The end block is not involved in memory allocation, so a direct 0 is sufficient*/ + } else { /*The last block is too small, directly as the end block*/ + pRoot->pEnd = pPriv_node; + pRoot->total_size -= pPriv_node->mem_size; + } + MMHEAP_ASSERT(pRoot->pStart != NULL); + MMHEAP_ASSERT(pRoot->pEnd != NULL); +} diff --git a/common/memheap/drv_mmheap.h b/common/memheap/drv_mmheap.h index df76969f..cf4595da 100644 --- a/common/memheap/drv_mmheap.h +++ b/common/memheap/drv_mmheap.h @@ -1,194 +1,143 @@ -/** - * @file drv_mmheap.h - * @brief - * - * Copyright (c) 2021 Bouffalolab team - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - */ -#ifndef _DRV_MMHEAP_H_ -#define _DRV_MMHEAP_H_ - -#include "stdint.h" -#include "string.h" - -#define MEMHEAP_STATUS_OK 0 -#define MEMHEAP_STATUS_INVALID_ADDR -1 -#define MEMHEAP_STATUS_INVALID_SIZE -2 -#define MEMHEAP_STATUS_OVERFLOW -3 -#define MEMHEAP_STATUS_ALREADY_NOT_EXIST -4 -#define MEMHEAP_STATUS_ALREADY_EXIST -5 - -/** - * log2 of number of linear subdivisions of block sizes. Larger - * values require more memory in the control structure. Values of - * 4 or 5 are typical. - */ -#define MMHEAP_SL_INDEX_COUNT_LOG2 5 - -/* All allocation sizes and addresses are aligned to 4 bytes. */ -#define MMHEAP_ALIGN_SIZE_LOG2 2 -#define MMHEAP_ALIGN_SIZE (1 << MMHEAP_ALIGN_SIZE_LOG2) - -/* - * We support allocations of sizes up to (1 << MMHEAP_FL_INDEX_MAX) bits. - * However, because we linearly subdivide the second-level lists, and - * our minimum size granularity is 4 bytes, it doesn't make sense to - * create first-level lists for sizes smaller than MMHEAP_SL_INDEX_COUNT * 4, - * or (1 << (K_MMHEAP_SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be - * trying to split size ranges into more slots than we have available. - * Instead, we calculate the minimum threshold size, and place all - * blocks below that size into the 0th first-level list. - */ -#define MMHEAP_FL_INDEX_MAX 30 -#define MMHEAP_SL_INDEX_COUNT (1 << MMHEAP_SL_INDEX_COUNT_LOG2) -#define MMHEAP_FL_INDEX_SHIFT (MMHEAP_SL_INDEX_COUNT_LOG2 + MMHEAP_ALIGN_SIZE_LOG2) -#define MMHEAP_FL_INDEX_COUNT (MMHEAP_FL_INDEX_MAX - MMHEAP_FL_INDEX_SHIFT + 1) - -#define MMHEAP_SMALL_BLOCK_SIZE (1 << MMHEAP_FL_INDEX_SHIFT) - -#define MMHEAP_BLOCK_CURR_FREE (1 << 0) -#define MMHEAP_BLOCK_PREV_FREE (1 << 1) -#define MMHEAP_BLOCK_SIZE_MASK ~(MMHEAP_BLOCK_CURR_FREE | MMHEAP_BLOCK_PREV_FREE) -#define MMHEAP_BLOCK_STATE_MASK (MMHEAP_BLOCK_CURR_FREE | MMHEAP_BLOCK_PREV_FREE) - -typedef struct -{ - uint32_t used; /* space is used */ - uint32_t free; /* space is free */ -} mmheap_info_t; - -/** - * Block structure. - * - * There are several implementation subtleties involved: - * - The prev_phys_block field is only valid if the previous block is free. - * - The prev_phys_block field is actually stored at the end of the - * previous block. It appears at the beginning of this structure only to - * simplify the implementation. - * - The next_free / prev_free fields are only valid if the block is free. - */ -typedef struct mmheap_blk_st { - struct mmheap_blk_st *prev_phys_blk; - - size_t size; - - struct mmheap_blk_st *next_free; - struct mmheap_blk_st *prev_free; -} mmheap_blk_t; - -/** - * A free block must be large enough to store its header minus the size of - * the prev_phys_block field, and no larger than the number of addressable - * bits for FL_INDEX. - */ -#define MMHEAP_BLK_SIZE_MIN (sizeof(mmheap_blk_t) - sizeof(mmheap_blk_t *)) -#define MMHEAP_BLK_SIZE_MAX (1 << MMHEAP_FL_INDEX_MAX) - -#define MMHEAP_BLK_HEADER_OVERHEAD (sizeof(size_t)) -#define MMHEAP_BLK_START_OFFSET (((uint32_t)(uintptr_t) & (((mmheap_blk_t *)0)->size)) + sizeof(size_t)) - -#define MMHEAP_POOL_MAX 3 - -/** - * memory heap control - */ -typedef struct -{ - int pool_cnt; - void *pool_start[MMHEAP_POOL_MAX]; - - mmheap_blk_t block_null; /**< Empty lists point at this block to indicate they are free. */ - - uint32_t fl_bitmap; /**< Bitmaps for free lists. */ - uint32_t sl_bitmap[MMHEAP_FL_INDEX_COUNT]; - - mmheap_blk_t *blocks[MMHEAP_FL_INDEX_COUNT][MMHEAP_SL_INDEX_COUNT]; /**< Head of free lists. */ -} mmheap_ctl_t; - -/** - * @brief Add a pool. - * Add addtional pool to the heap. - * - * @attention None - * - * @param[in] pool_start start address of the pool. - * @param[in] pool_size size of the pool. - * - * @return errcode - */ -int mmheap_pool_add(void *pool_start, size_t pool_size); - -/** - * @brief Alloc memory. - * Allocate size bytes and returns a pointer to the allocated memory. - * - * @attention size should no bigger than MMHEAP_BLK_SIZE_MAX. - * - * @param[in] size size of the memory. - * - * @return the pointer to the allocated memory. - */ -void *mmheap_alloc(size_t size); - -void *mmheap_calloc(size_t num, size_t size); - -/** - * @brief Alloc start address aligned memory from the heap. - * Alloc aligned address and specified size memory from the heap. - * - * @attention - * - * @param[in] size size of the memory. - * @param[in] align address align mask of the memory. - * - * @return the pointer to the allocated memory. - */ -void *mmheap_aligned_alloc(size_t size, size_t align); - -/** - * @brief Realloc memory from the heap. - * Change the size of the memory block pointed to by ptr to size bytes. - * - * @attention - *