@@ -280,14 +280,14 @@ static inline __attribute__((always_inline)) void mapping_insert(control_t* cont
280
280
}
281
281
282
282
/* This version rounds up to the next block size (for allocations) */
283
- static inline __attribute__((always_inline )) void mapping_search (control_t * control , size_t size , int * fli , int * sli )
283
+ static inline __attribute__((always_inline )) void mapping_search (control_t * control , size_t * size , int * fli , int * sli )
284
284
{
285
- if (size >= control -> small_block_size )
285
+ if (* size >= control -> small_block_size )
286
286
{
287
- const size_t round = (1 << (tlsf_fls_sizet (size ) - control -> sl_index_count_log2 )) - 1 ;
288
- size += round ;
287
+ const size_t round = (1 << (tlsf_fls_sizet (* size ) - control -> sl_index_count_log2 ));
288
+ * size = align_up ( * size , round ) ;
289
289
}
290
- mapping_insert (control , size , fli , sli );
290
+ mapping_insert (control , * size , fli , sli );
291
291
}
292
292
293
293
static inline __attribute__((always_inline )) block_header_t * search_suitable_block (control_t * control , int * fli , int * sli )
@@ -540,12 +540,12 @@ static inline __attribute__((always_inline)) block_header_t* block_trim_free_lea
540
540
return remaining_block ;
541
541
}
542
542
543
- static inline __attribute__((always_inline )) block_header_t * block_locate_free (control_t * control , size_t size )
543
+ static inline __attribute__((always_inline )) block_header_t * block_locate_free (control_t * control , size_t * size )
544
544
{
545
545
int fl = 0 , sl = 0 ;
546
546
block_header_t * block = 0 ;
547
547
548
- if (size )
548
+ if (* size )
549
549
{
550
550
mapping_search (control , size , & fl , & sl );
551
551
@@ -563,7 +563,7 @@ static inline __attribute__((always_inline)) block_header_t* block_locate_free(c
563
563
564
564
if (block )
565
565
{
566
- tlsf_assert (block_size (block ) >= size );
566
+ tlsf_assert (block_size (block ) >= * size );
567
567
remove_free_block (control , block , fl , sl );
568
568
}
569
569
@@ -1011,8 +1011,14 @@ pool_t tlsf_get_pool(tlsf_t tlsf)
1011
1011
void * tlsf_malloc (tlsf_t tlsf , size_t size )
1012
1012
{
1013
1013
control_t * control = tlsf_cast (control_t * , tlsf );
1014
- const size_t adjust = adjust_request_size (tlsf , size , ALIGN_SIZE );
1015
- block_header_t * block = block_locate_free (control , adjust );
1014
+ size_t adjust = adjust_request_size (tlsf , size , ALIGN_SIZE );
1015
+ // Returned size is 0 when the requested size is larger than the max block
1016
+ // size.
1017
+ if (adjust == 0 ) {
1018
+ return NULL ;
1019
+ }
1020
+ // block_locate_free() may adjust our allocated size further.
1021
+ block_header_t * block = block_locate_free (control , & adjust );
1016
1022
return block_prepare_used (control , block , adjust );
1017
1023
}
1018
1024
@@ -1138,9 +1144,9 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off
1138
1144
** alignment constraint. Thus, the gap is not required.
1139
1145
** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
1140
1146
*/
1141
- const size_t aligned_size = (adjust && align > ALIGN_SIZE ) ? size_with_gap : adjust ;
1147
+ size_t aligned_size = (adjust && align > ALIGN_SIZE ) ? size_with_gap : adjust ;
1142
1148
1143
- block_header_t * block = block_locate_free (control , aligned_size );
1149
+ block_header_t * block = block_locate_free (control , & aligned_size );
1144
1150
1145
1151
/* This can't be a static assert. */
1146
1152
tlsf_assert (sizeof (block_header_t ) == block_size_min + block_header_overhead );
0 commit comments