2024-06-28 03:28:23 +02:00
2024-07-02 19:12:31 +02:00
# define KB(x) (x*1024ull)
# define MB(x) ((KB(x))*1024ull)
# define GB(x) ((MB(x))*1024ull)
2024-06-28 18:50:30 +02:00
# ifndef INIT_MEMORY_SIZE
2024-07-02 19:12:31 +02:00
# define INIT_MEMORY_SIZE KB(50)
2024-06-28 18:50:30 +02:00
# endif
// We may need to allocate stuff in initialization time before the heap is ready.
// That's what this is for.
u8 init_memory_arena [ INIT_MEMORY_SIZE ] ;
u8 * init_memory_head = init_memory_arena ;
2024-07-01 13:10:06 +02:00
void * initialization_allocator_proc ( u64 size , void * p , Allocator_Message message , void * data ) {
2024-06-28 18:50:30 +02:00
switch ( message ) {
case ALLOCATOR_ALLOCATE : {
p = init_memory_head ;
init_memory_head + = size ;
if ( init_memory_head > = ( ( u8 * ) init_memory_arena + INIT_MEMORY_SIZE ) ) {
2024-07-02 15:27:33 +02:00
os_write_string_to_stdout ( STR ( " Out of initialization memory! Please provide more by increasing INIT_MEMORY_SIZE " ) ) ;
2024-07-04 20:56:27 +02:00
crash ( ) ;
2024-06-28 18:50:30 +02:00
}
return p ;
break ;
}
case ALLOCATOR_DEALLOCATE : {
return 0 ;
}
2024-06-29 13:27:37 +02:00
case ALLOCATOR_REALLOCATE : {
return 0 ;
}
2024-06-28 18:50:30 +02:00
}
return 0 ;
}
2024-07-01 13:10:06 +02:00
Allocator get_initialization_allocator ( ) {
Allocator a ;
a . proc = initialization_allocator_proc ;
return a ;
}
2024-06-28 03:28:23 +02:00
///
///
// Basic general heap allocator, free list
///
// Technically thread safe but synchronization is horrible.
// Fragmentation is catastrophic.
// We could fix it by merging free nodes every now and then
// BUT: We aren't really supposed to allocate/deallocate directly on the heap too much anyways...
2024-07-28 17:17:58 +02:00
# define MAX_HEAP_BLOCK_SIZE align_next(MB(500), os.page_size)
2024-07-28 15:08:36 +02:00
# define DEFAULT_HEAP_BLOCK_SIZE (min(MAX_HEAP_BLOCK_SIZE, program_memory_capacity))
2024-06-28 03:28:23 +02:00
# define HEAP_ALIGNMENT (sizeof(Heap_Free_Node))
typedef struct Heap_Free_Node Heap_Free_Node ;
typedef struct Heap_Block Heap_Block ;
typedef struct Heap_Free_Node {
u64 size ;
Heap_Free_Node * next ;
} Heap_Free_Node ;
typedef struct Heap_Block {
u64 size ;
Heap_Free_Node * free_head ;
void * start ;
Heap_Block * next ;
// 32 bytes !!
2024-07-20 16:10:55 +02:00
# if CONFIGURATION == DEBUG
u64 total_allocated ;
u64 padding ;
# endif
2024-06-28 03:28:23 +02:00
} Heap_Block ;
2024-07-08 17:57:23 +02:00
# define HEAP_META_SIGNATURE 6969694206942069ull
2024-07-15 21:40:27 +02:00
typedef alignat ( 16 ) struct Heap_Allocation_Metadata {
2024-06-28 03:28:23 +02:00
u64 size ;
Heap_Block * block ;
2024-07-08 17:57:23 +02:00
# if CONFIGURATION == DEBUG
u64 signature ;
u64 padding ;
# endif
2024-06-28 03:28:23 +02:00
} Heap_Allocation_Metadata ;
2024-07-22 19:19:57 +02:00
// #Global
ogb_instance Heap_Block * heap_head ;
ogb_instance bool heap_initted ;
ogb_instance Spinlock heap_lock ;
# if !OOGABOOGA_LINK_EXTERNAL_INSTANCE
2024-06-28 03:28:23 +02:00
Heap_Block * heap_head ;
bool heap_initted = false ;
2024-07-22 19:19:57 +02:00
Spinlock heap_lock ;
# endif // NOT OOGABOOGA_LINK_EXTERNAL_INSTANCE
2024-06-28 19:10:29 +02:00
2024-06-28 03:28:23 +02:00
u64 get_heap_block_size_excluding_metadata ( Heap_Block * block ) {
return block - > size - sizeof ( Heap_Block ) ;
}
u64 get_heap_block_size_including_metadata ( Heap_Block * block ) {
return block - > size ;
}
bool is_pointer_in_program_memory ( void * p ) {
2024-07-28 15:08:36 +02:00
return ( u8 * ) p > = ( u8 * ) program_memory & & ( u8 * ) p < ( ( u8 * ) program_memory + program_memory_capacity ) ;
2024-06-28 03:28:23 +02:00
}
2024-06-28 18:50:30 +02:00
bool is_pointer_in_stack ( void * p ) {
void * stack_base = os_get_stack_base ( ) ;
void * stack_limit = os_get_stack_limit ( ) ;
return ( uintptr_t ) p > = ( uintptr_t ) stack_limit & & ( uintptr_t ) p < ( uintptr_t ) stack_base ;
}
bool is_pointer_in_static_memory ( void * p ) {
return ( uintptr_t ) p > = ( uintptr_t ) os . static_memory_start & & ( uintptr_t ) p < ( uintptr_t ) os . static_memory_end ;
}
bool is_pointer_valid ( void * p ) {
return is_pointer_in_program_memory ( p ) | | is_pointer_in_stack ( p ) | | is_pointer_in_static_memory ( p ) ;
}
2024-06-28 03:28:23 +02:00
// Meant for debug
2024-07-15 21:40:27 +02:00
void sanity_check_block ( Heap_Block * block ) {
2024-07-23 23:32:50 +02:00
# if CONFIGURATION == DEBUG
2024-07-15 21:40:27 +02:00
assert ( is_pointer_in_program_memory ( block ) , " Heap_Block pointer is corrupt " ) ;
assert ( is_pointer_in_program_memory ( block - > start ) , " Heap_Block pointer is corrupt " ) ;
if ( block - > next ) { assert ( is_pointer_in_program_memory ( block - > next ) , " Heap_Block next pointer is corrupt " ) ; }
assert ( block - > size < GB ( 256 ) , " A heap block is corrupt. " ) ;
2024-07-20 16:10:55 +02:00
assert ( block - > size > = INITIAL_PROGRAM_MEMORY_SIZE , " A heap block is corrupt. " ) ;
2024-07-15 21:40:27 +02:00
assert ( ( u64 ) block - > start = = ( u64 ) block + sizeof ( Heap_Block ) , " A heap block is corrupt. " ) ;
Heap_Free_Node * node = block - > free_head ;
2024-06-28 03:28:23 +02:00
u64 total_free = 0 ;
while ( node ! = 0 ) {
Heap_Free_Node * other_node = node - > next ;
2024-07-15 21:40:27 +02:00
assert ( node - > size < GB ( 256 ) , " Heap is corrupt " ) ;
assert ( is_pointer_in_program_memory ( node ) , " Heap is corrupt " ) ;
2024-06-28 03:28:23 +02:00
while ( other_node ! = 0 ) {
2024-07-15 21:40:27 +02:00
assert ( is_pointer_in_program_memory ( other_node ) , " Heap is corrupt " ) ;
2024-07-08 17:57:23 +02:00
assert ( other_node ! = node , " Circular reference in heap free node tree. This is probably an internal error, or an extremely unlucky result from heap corruption. " ) ;
2024-06-28 03:28:23 +02:00
other_node = other_node - > next ;
}
total_free + = node - > size ;
2024-07-08 17:57:23 +02:00
assert ( total_free < = block - > size , " Free nodes are fucky wucky. This might be heap corruption, or possibly an internal error. " ) ;
2024-06-28 03:28:23 +02:00
node = node - > next ;
}
2024-07-20 16:10:55 +02:00
u64 expected_size = get_heap_block_size_excluding_metadata ( block ) ;
assert ( block - > total_allocated + total_free = = expected_size , " Heap is corrupt. " )
2024-07-23 23:32:50 +02:00
# endif
2024-06-28 03:28:23 +02:00
}
2024-07-03 17:55:25 +02:00
inline void check_meta ( Heap_Allocation_Metadata * meta ) {
2024-07-08 17:57:23 +02:00
# if CONFIGURATION == DEBUG
assert ( meta - > signature = = HEAP_META_SIGNATURE , " Heap error. Either 1) You passed a bad pointer to dealloc or 2) You corrupted the heap. " ) ;
# endif
2024-07-03 17:55:25 +02:00
// If > 256GB then prolly not legit lol
2024-07-08 17:57:23 +02:00
assert ( meta - > size < 1024ULL * 1024ULL * 1024ULL * 256ULL , " Heap error. Either 1) You passed a bad pointer to dealloc or 2) You corrupted the heap. " ) ;
assert ( is_pointer_in_program_memory ( meta - > block ) , " Heap error. Either 1) You passed a bad pointer to dealloc or 2) You corrupted the heap. " ) ;
assert ( ( u64 ) meta > = ( u64 ) meta - > block - > start & & ( u64 ) meta < ( u64 ) meta - > block - > start + meta - > block - > size , " Heap error: Pointer is not in it's metadata block. This could be heap corruption but it's more likely an internal error. That's not good. " ) ;
2024-07-03 17:55:25 +02:00
}
2024-06-28 03:28:23 +02:00
typedef struct {
Heap_Free_Node * best_fit ;
Heap_Free_Node * previous ;
u64 delta ;
} Heap_Search_Result ;
Heap_Search_Result search_heap_block ( Heap_Block * block , u64 size ) {
if ( block - > free_head = = 0 ) return ( Heap_Search_Result ) { 0 , 0 , 0 } ;
Heap_Free_Node * node = block - > free_head ;
Heap_Free_Node * previous = 0 ;
Heap_Free_Node * best_fit = 0 ;
Heap_Free_Node * before_best_fit = 0 ;
u64 best_fit_delta = 0 ;
while ( node ! = 0 ) {
if ( node - > size = = size ) {
Heap_Search_Result result ;
result . best_fit = node ;
result . previous = previous ;
result . delta = 0 ;
2024-07-08 17:57:23 +02:00
assert ( result . previous ! = result . best_fit , " Internal goof " ) ;
2024-06-28 03:28:23 +02:00
return result ;
}
if ( node - > size > = size ) {
u64 delta = node - > size - size ;
if ( delta < best_fit_delta | | ! best_fit ) {
before_best_fit = previous ;
best_fit = node ;
best_fit_delta = delta ;
}
}
if ( node - > next ) previous = node ;
node = node - > next ;
}
if ( ! best_fit ) return ( Heap_Search_Result ) { 0 , 0 , 0 } ;
Heap_Search_Result result ;
result . best_fit = best_fit ;
result . previous = before_best_fit ;
result . delta = best_fit_delta ;
2024-07-08 17:57:23 +02:00
assert ( result . previous ! = result . best_fit , " Internal goof " ) ;
2024-06-28 03:28:23 +02:00
return result ;
}
Heap_Block * make_heap_block ( Heap_Block * parent , u64 size ) {
2024-07-15 21:40:27 +02:00
size + = sizeof ( Heap_Block ) ;
2024-07-28 17:17:58 +02:00
size = align_next ( size , os . page_size ) ;
2024-06-28 03:28:23 +02:00
2024-07-28 15:08:36 +02:00
Heap_Block * block = ( Heap_Block * ) os_reserve_next_memory_pages ( size ) ;
assert ( ( u64 ) block % os . page_size = = 0 , " Heap block not aligned to page size " ) ;
if ( parent ) parent - > next = block ;
os_unlock_program_memory_pages ( block , size ) ;
2024-07-23 23:32:50 +02:00
# if CONFIGURATION == DEBUG
2024-07-20 16:10:55 +02:00
block - > total_allocated = 0 ;
2024-07-23 23:32:50 +02:00
# endif
2024-06-28 03:28:23 +02:00
block - > start = ( ( u8 * ) block ) + sizeof ( Heap_Block ) ;
block - > size = size ;
block - > next = 0 ;
block - > free_head = ( Heap_Free_Node * ) block - > start ;
block - > free_head - > size = get_heap_block_size_excluding_metadata ( block ) ;
block - > free_head - > next = 0 ;
return block ;
}
void heap_init ( ) {
2024-06-28 18:50:30 +02:00
if ( heap_initted ) return ;
2024-07-03 17:55:25 +02:00
assert ( HEAP_ALIGNMENT = = 16 ) ;
assert ( sizeof ( Heap_Allocation_Metadata ) % HEAP_ALIGNMENT = = 0 ) ;
2024-06-28 03:28:23 +02:00
heap_initted = true ;
2024-06-28 18:50:30 +02:00
heap_head = make_heap_block ( 0 , DEFAULT_HEAP_BLOCK_SIZE ) ;
2024-07-09 18:36:37 +02:00
spinlock_init ( & heap_lock ) ;
2024-06-28 03:28:23 +02:00
}
void * heap_alloc ( u64 size ) {
if ( ! heap_initted ) heap_init ( ) ;
2024-06-28 12:07:02 +02:00
// #Sync #Speed oof
2024-07-09 18:36:37 +02:00
spinlock_acquire_or_wait ( & heap_lock ) ;
2024-07-09 18:51:15 +02:00
2024-07-15 21:40:27 +02:00
2024-07-08 17:57:23 +02:00
2024-06-28 03:28:23 +02:00
size + = sizeof ( Heap_Allocation_Metadata ) ;
size = ( size + HEAP_ALIGNMENT ) & ~ ( HEAP_ALIGNMENT - 1 ) ;
2024-07-08 15:33:01 +02:00
assert ( size < MAX_HEAP_BLOCK_SIZE , " Past Charlie has been lazy and did not handle large allocations like this. I apologize on behalf of past Charlie. A quick fix could be to increase the heap block size for now. #Incomplete #Limitation " ) ;
2024-06-28 03:28:23 +02:00
2024-07-20 16:10:55 +02:00
# if VERY_DEBUG
{
Heap_Block * block = heap_head ;
while ( block ! = 0 ) {
sanity_check_block ( block ) ;
block = block - > next ;
}
}
# endif
2024-06-28 03:28:23 +02:00
Heap_Block * block = heap_head ;
Heap_Block * last_block = 0 ;
Heap_Free_Node * best_fit = 0 ;
Heap_Block * best_fit_block = 0 ;
Heap_Free_Node * previous = 0 ;
u64 best_fit_delta = 0 ;
// #Speed
// Maybe instead of going through EVERY free node to find best fit we do a good-enough fit
while ( block ! = 0 ) {
2024-07-15 21:40:27 +02:00
if ( get_heap_block_size_excluding_metadata ( block ) < size ) {
2024-07-02 19:12:31 +02:00
last_block = block ;
block = block - > next ;
continue ;
}
2024-06-28 03:28:23 +02:00
Heap_Search_Result result = search_heap_block ( block , size ) ;
Heap_Free_Node * node = result . best_fit ;
if ( node ) {
if ( node - > size < size ) continue ;
if ( node - > size = = size ) {
best_fit = node ;
best_fit_block = block ;
previous = result . previous ;
best_fit_delta = 0 ;
break ;
}
u64 delta = node - > size - size ;
if ( delta < best_fit_delta | | ! best_fit ) {
best_fit = node ;
best_fit_block = block ;
previous = result . previous ;
best_fit_delta = delta ;
}
}
last_block = block ;
block = block - > next ;
}
if ( ! best_fit ) {
2024-07-02 19:12:31 +02:00
block = make_heap_block ( last_block , max ( DEFAULT_HEAP_BLOCK_SIZE , size ) ) ;
2024-06-28 03:28:23 +02:00
previous = 0 ;
best_fit = block - > free_head ;
best_fit_block = block ;
}
2024-07-08 17:57:23 +02:00
assert ( best_fit ! = 0 , " Internal heap error " ) ;
2024-06-28 03:28:23 +02:00
2024-07-28 17:17:58 +02:00
// Unlock best fit
// #Copypaste
void * free_tail = ( u8 * ) best_fit + best_fit - > size ;
void * first_page = ( void * ) align_previous ( best_fit , os . page_size ) ;
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
if ( ( u8 * ) last_page_end > ( u8 * ) first_page ) {
os_unlock_program_memory_pages ( first_page , ( u64 ) last_page_end - ( u64 ) first_page ) ;
}
2024-06-28 03:28:23 +02:00
Heap_Free_Node * new_free_node = 0 ;
if ( size ! = best_fit - > size ) {
u64 remainder = best_fit - > size - size ;
new_free_node = ( Heap_Free_Node * ) ( ( ( u8 * ) best_fit ) + size ) ;
new_free_node - > size = remainder ;
new_free_node - > next = best_fit - > next ;
2024-07-28 17:17:58 +02:00
// Lock remaining free node
// #Copypaste
void * free_tail = ( u8 * ) new_free_node + new_free_node - > size ;
void * next_page = ( void * ) align_next ( new_free_node , os . page_size ) ;
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
if ( ( u8 * ) last_page_end > ( u8 * ) next_page ) {
os_lock_program_memory_pages ( next_page , ( u64 ) last_page_end - ( u64 ) next_page ) ;
}
2024-06-28 03:28:23 +02:00
}
2024-07-28 17:17:58 +02:00
2024-06-28 03:28:23 +02:00
if ( previous & & new_free_node ) {
2024-07-08 17:57:23 +02:00
assert ( previous - > next = = best_fit , " Internal heap error " ) ;
2024-06-28 03:28:23 +02:00
previous - > next = new_free_node ;
} else if ( previous ) {
2024-07-08 17:57:23 +02:00
assert ( previous - > next = = best_fit , " Internal heap error " ) ;
2024-06-28 03:28:23 +02:00
previous - > next = best_fit - > next ;
}
if ( best_fit_block - > free_head = = best_fit ) {
// If we allocated the first free node then replace with new free node or just
// remove it if perfect fit.
if ( new_free_node ) {
new_free_node - > next = best_fit_block - > free_head - > next ;
best_fit_block - > free_head = new_free_node ;
} else best_fit_block - > free_head = best_fit_block - > free_head - > next ;
}
Heap_Allocation_Metadata * meta = ( Heap_Allocation_Metadata * ) best_fit ;
meta - > size = size ;
meta - > block = best_fit_block ;
2024-07-08 17:57:23 +02:00
# if CONFIGURATION == DEBUG
meta - > signature = HEAP_META_SIGNATURE ;
2024-07-20 16:10:55 +02:00
meta - > block - > total_allocated + = size ;
2024-07-08 17:57:23 +02:00
# endif
check_meta ( meta ) ;
2024-06-28 03:28:23 +02:00
2024-07-08 17:57:23 +02:00
# if VERY_DEBUG
2024-07-15 21:40:27 +02:00
sanity_check_block ( meta - > block ) ;
2024-06-28 03:28:23 +02:00
# endif
// #Sync #Speed oof
2024-07-09 18:36:37 +02:00
spinlock_release ( & heap_lock ) ;
2024-06-28 03:28:23 +02:00
2024-07-03 17:55:25 +02:00
void * p = ( ( u8 * ) meta ) + sizeof ( Heap_Allocation_Metadata ) ;
2024-07-08 17:57:23 +02:00
assert ( ( u64 ) p % HEAP_ALIGNMENT = = 0 , " Internal heap error. Result pointer is not aligned to HEAP_ALIGNMENT " ) ;
2024-07-03 17:55:25 +02:00
return p ;
2024-06-28 03:28:23 +02:00
}
void heap_dealloc ( void * p ) {
// #Sync #Speed oof
if ( ! heap_initted ) heap_init ( ) ;
2024-06-28 12:07:02 +02:00
2024-07-09 18:36:37 +02:00
spinlock_acquire_or_wait ( & heap_lock ) ;
2024-06-28 03:28:23 +02:00
2024-07-08 17:57:23 +02:00
assert ( is_pointer_in_program_memory ( p ) , " A bad pointer was passed tp heap_dealloc: it is out of program memory bounds! " ) ;
2024-06-28 03:28:23 +02:00
p = ( u8 * ) p - sizeof ( Heap_Allocation_Metadata ) ;
Heap_Allocation_Metadata * meta = ( Heap_Allocation_Metadata * ) ( p ) ;
2024-07-03 17:55:25 +02:00
check_meta ( meta ) ;
2024-06-28 03:28:23 +02:00
// Yoink meta data before we start overwriting it
Heap_Block * block = meta - > block ;
u64 size = meta - > size ;
2024-07-15 21:40:27 +02:00
# if CONFIGURATION == DEBUG
memset ( p , 0x69696969 , size ) ;
# endif
2024-07-08 17:57:23 +02:00
# if VERY_DEBUG
2024-07-15 21:40:27 +02:00
sanity_check_block ( block ) ;
2024-07-08 17:57:23 +02:00
# endif
2024-06-28 03:28:23 +02:00
Heap_Free_Node * new_node = cast ( Heap_Free_Node * ) p ;
new_node - > size = size ;
if ( new_node < block - > free_head ) {
2024-07-28 17:17:58 +02:00
// #Copypaste
void * free_tail = ( u8 * ) new_node + new_node - > size ;
void * next_page = ( void * ) align_next ( new_node , os . page_size ) ;
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
if ( ( u8 * ) last_page_end > ( u8 * ) next_page ) {
os_lock_program_memory_pages ( next_page , ( u64 ) last_page_end - ( u64 ) next_page ) ;
}
2024-06-28 03:28:23 +02:00
if ( ( u8 * ) new_node + size = = ( u8 * ) block - > free_head ) {
new_node - > size = size + block - > free_head - > size ;
new_node - > next = block - > free_head - > next ;
block - > free_head = new_node ;
} else {
new_node - > next = block - > free_head ;
block - > free_head = new_node ;
}
2024-07-28 17:17:58 +02:00
2024-06-28 03:28:23 +02:00
} else {
2024-07-08 17:57:23 +02:00
if ( ! block - > free_head ) {
block - > free_head = new_node ;
new_node - > next = 0 ;
2024-07-28 17:17:58 +02:00
// #Copypaste
void * free_tail = ( u8 * ) new_node + new_node - > size ;
void * next_page = ( void * ) align_next ( new_node , os . page_size ) ;
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
if ( ( u8 * ) last_page_end > ( u8 * ) next_page ) {
os_lock_program_memory_pages ( next_page , ( u64 ) last_page_end - ( u64 ) next_page ) ;
}
2024-07-08 17:57:23 +02:00
} else {
Heap_Free_Node * node = block - > free_head ;
2024-06-28 03:28:23 +02:00
2024-07-08 17:57:23 +02:00
while ( true ) {
2024-06-28 03:28:23 +02:00
2024-07-08 17:57:23 +02:00
assert ( node ! = 0 , " We didn't find where the free node should be! This is likely heap corruption (or, hopefully not, an internal error) " ) ;
// In retrospect, I don't remember a good reason to care about where the
// free nodes are... maybe I'm just dumb right now? #Speed #Memory
// ... ACtually, it's probably to easily know when to merge free nodes.
// BUT. Maybe it's not worth the performance hit? Then again, if the heap
// allocator slows down your program you should rethink your memory management
// anyways...
if ( new_node > = node ) {
u8 * node_tail = ( u8 * ) node + node - > size ;
if ( cast ( u8 * ) new_node = = node_tail ) {
2024-07-28 17:17:58 +02:00
// We need to account for the cases where we coalesce free blocks with start/end in the middle
// of a page.
u64 new_node_size = new_node - > size ;
// #Copypaste
void * free_tail = ( u8 * ) new_node + new_node - > size ;
2024-07-30 18:24:20 +02:00
void * next_page = ( void * ) align_previous ( node_tail , os . page_size ) ;
2024-07-28 17:17:58 +02:00
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
2024-07-30 18:24:20 +02:00
if ( ( u8 * ) next_page < ( u8 * ) node ) next_page = ( u8 * ) next_page + os . page_size ;
2024-07-28 17:17:58 +02:00
if ( ( u8 * ) last_page_end > ( u8 * ) next_page ) {
os_lock_program_memory_pages ( next_page , ( u64 ) last_page_end - ( u64 ) next_page ) ;
}
node - > size + = new_node_size ;
2024-07-08 17:57:23 +02:00
break ;
} else {
new_node - > next = node - > next ;
node - > next = new_node ;
u8 * new_node_tail = ( u8 * ) new_node + new_node - > size ;
if ( new_node - > next & & ( u8 * ) new_node - > next = = new_node_tail ) {
new_node - > size + = new_node - > next - > size ;
new_node - > next = new_node - > next - > next ;
}
2024-07-28 17:17:58 +02:00
// #Copypaste
void * free_tail = ( u8 * ) new_node + new_node - > size ;
void * next_page = ( void * ) align_next ( new_node , os . page_size ) ;
void * last_page_end = ( void * ) align_previous ( free_tail , os . page_size ) ;
if ( ( u8 * ) last_page_end > ( u8 * ) next_page ) {
os_lock_program_memory_pages ( next_page , ( u64 ) last_page_end - ( u64 ) next_page ) ;
}
2024-07-08 17:57:23 +02:00
break ;
2024-06-28 03:28:23 +02:00
}
}
2024-07-08 17:57:23 +02:00
node = node - > next ;
2024-06-28 03:28:23 +02:00
}
}
}
2024-07-08 17:57:23 +02:00
2024-07-20 16:10:55 +02:00
# if CONFIGURATION == DEBUG
block - > total_allocated - = size ;
# endif
2024-07-15 21:40:27 +02:00
# if VERY_DEBUG
sanity_check_block ( block ) ;
# endif
2024-06-28 03:28:23 +02:00
// #Sync #Speed oof
2024-07-09 18:36:37 +02:00
spinlock_release ( & heap_lock ) ;
2024-06-28 03:28:23 +02:00
}
2024-07-01 13:10:06 +02:00
void * heap_allocator_proc ( u64 size , void * p , Allocator_Message message , void * data ) {
2024-06-29 13:27:37 +02:00
switch ( message ) {
case ALLOCATOR_ALLOCATE : {
return heap_alloc ( size ) ;
break ;
}
case ALLOCATOR_DEALLOCATE : {
heap_dealloc ( p ) ;
return 0 ;
}
case ALLOCATOR_REALLOCATE : {
2024-07-01 02:14:08 +02:00
if ( ! p ) {
return heap_alloc ( size ) ;
}
2024-06-29 13:27:37 +02:00
assert ( is_pointer_valid ( p ) , " Invalid pointer passed to heap allocator reallocate " ) ;
2024-07-03 17:55:25 +02:00
Heap_Allocation_Metadata * meta = ( Heap_Allocation_Metadata * ) ( ( ( u64 ) p ) - sizeof ( Heap_Allocation_Metadata ) ) ;
check_meta ( meta ) ;
2024-06-29 13:27:37 +02:00
void * new = heap_alloc ( size ) ;
memcpy ( new , p , min ( size , meta - > size ) ) ;
heap_dealloc ( p ) ;
return new ;
}
}
return 0 ;
}
2024-07-01 02:14:08 +02:00
Allocator get_heap_allocator ( ) {
Allocator heap_allocator ;
heap_allocator . proc = heap_allocator_proc ;
heap_allocator . data = 0 ;
return heap_allocator ;
}
2024-06-28 03:28:23 +02:00
///
///
// Temporary storage
///
# ifndef TEMPORARY_STORAGE_SIZE
2024-07-02 19:12:31 +02:00
# define TEMPORARY_STORAGE_SIZE (1024ULL*1024ULL*2ULL) // 2mb
2024-06-28 03:28:23 +02:00
# endif
2024-07-23 17:33:11 +02:00
ogb_instance void * talloc ( u64 ) ;
ogb_instance void * temp_allocator_proc ( u64 size , void * p , Allocator_Message message , void * ) ;
2024-06-28 03:28:23 +02:00
2024-07-22 19:19:57 +02:00
// #Global
2024-07-23 17:33:11 +02:00
ogb_instance Allocator
get_temporary_allocator ( ) ;
2024-07-22 19:19:57 +02:00
# if !OOGABOOGA_LINK_EXTERNAL_INSTANCE
2024-06-28 03:28:23 +02:00
thread_local void * temporary_storage = 0 ;
thread_local void * temporary_storage_pointer = 0 ;
thread_local bool has_warned_temporary_storage_overflow = false ;
2024-07-23 17:33:11 +02:00
thread_local Allocator temp_allocator ;
2024-06-28 03:28:23 +02:00
2024-07-23 17:33:11 +02:00
ogb_instance Allocator
get_temporary_allocator ( ) {
return temp_allocator ;
2024-07-10 17:10:38 +02:00
}
2024-07-23 17:33:11 +02:00
# endif
ogb_instance void *
temp_allocator_proc ( u64 size , void * p , Allocator_Message message , void * data ) ;
ogb_instance void
2024-07-28 15:08:36 +02:00
temporary_storage_init ( u64 arena_size ) ;
2024-06-28 03:28:23 +02:00
2024-07-23 17:33:11 +02:00
ogb_instance void *
talloc ( u64 size ) ;
ogb_instance void
reset_temporary_storage ( ) ;
# if !OOGABOOGA_LINK_EXTERNAL_INSTANCE
2024-07-01 13:10:06 +02:00
void * temp_allocator_proc ( u64 size , void * p , Allocator_Message message , void * data ) {
2024-06-28 03:28:23 +02:00
switch ( message ) {
case ALLOCATOR_ALLOCATE : {
return talloc ( size ) ;
break ;
}
case ALLOCATOR_DEALLOCATE : {
return 0 ;
}
2024-06-29 13:27:37 +02:00
case ALLOCATOR_REALLOCATE : {
return 0 ;
}
2024-06-28 03:28:23 +02:00
}
return 0 ;
}
2024-07-28 15:08:36 +02:00
void temporary_storage_init ( u64 arena_size ) {
2024-06-28 03:28:23 +02:00
2024-07-28 15:08:36 +02:00
temporary_storage = heap_alloc ( arena_size ) ;
2024-06-28 03:28:23 +02:00
assert ( temporary_storage , " Failed allocating temporary storage " ) ;
temporary_storage_pointer = temporary_storage ;
2024-07-23 17:33:11 +02:00
temp_allocator . proc = temp_allocator_proc ;
temp_allocator . data = 0 ;
2024-06-28 03:28:23 +02:00
2024-07-23 17:33:11 +02:00
temp_allocator . proc = temp_allocator_proc ;
2024-06-28 03:28:23 +02:00
}
void * talloc ( u64 size ) {
assert ( size < TEMPORARY_STORAGE_SIZE , " Bruddah this is too large for temp allocator " ) ;
void * p = temporary_storage_pointer ;
temporary_storage_pointer = ( u8 * ) temporary_storage_pointer + size ;
if ( ( u8 * ) temporary_storage_pointer > = ( u8 * ) temporary_storage + TEMPORARY_STORAGE_SIZE ) {
if ( ! has_warned_temporary_storage_overflow ) {
2024-07-02 15:27:33 +02:00
os_write_string_to_stdout ( STR ( " WARNING: temporary storage was overflown, we wrap around at the start. \n " ) ) ;
2024-06-28 03:28:23 +02:00
}
temporary_storage_pointer = temporary_storage ;
return talloc ( size ) ; ;
}
return p ;
}
void reset_temporary_storage ( ) {
2024-07-28 15:08:36 +02:00
temporary_storage_pointer = temporary_storage ;
2024-06-28 03:28:23 +02:00
has_warned_temporary_storage_overflow = true ;
}
2024-07-23 17:33:11 +02:00
# endif // NOT OOGABOOGA_LINK_EXTERNAL_INSTANCE