@@ -177,6 +177,7 @@ INTERNAL_HIDDEN void iso_alloc_initialize_global_root(void) {
177177 * result in a soft page fault */
178178 MLOCK (& _root , sizeof (iso_alloc_root ));
179179
180+ _root -> zone_retirement_shf = _log2 (ZONE_ALLOC_RETIRE );
180181 _root -> zones_size = (MAX_ZONES * sizeof (iso_alloc_zone_t ));
181182 _root -> zones_size += (g_page_size * 2 );
182183 _root -> zones_size = ROUND_UP_PAGE (_root -> zones_size );
@@ -529,7 +530,6 @@ INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal, int3
529530 if (new_zone -> tagged == true) {
530531 create_guard_page (p + g_page_size + tag_mapping_size );
531532 new_zone -> user_pages_start = (p + g_page_size + tag_mapping_size + g_page_size );
532-
533533 uint64_t * _mtp = p + g_page_size ;
534534
535535 /* Generate random tags */
@@ -779,8 +779,7 @@ INTERNAL_HIDDEN bit_slot_t iso_scan_zone_free_slot_slow(iso_alloc_zone_t *zone)
779779 bit_slot_t bts = bm [i ];
780780
781781 for (int64_t j = 0 ; j < BITS_PER_QWORD ; j += BITS_PER_CHUNK ) {
782- /* We can easily check if every bitslot represented by
783- * this qword is allocated with or without canaries */
782+ /* Check each bit to see if its available */
784783 if ((GET_BIT (bts , j )) == 0 ) {
785784 return ((i << BITS_PER_QWORD_SHIFT ) + j );
786785 }
@@ -963,7 +962,7 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
963962
964963 /* This chunk was either previously allocated and free'd
965964 * or it's a canary chunk. In either case this means it
966- * has a canary written in its first dword . Here we check
965+ * has a canary written in its first qword . Here we check
967966 * that canary and abort if its been corrupted */
968967#if !ENABLE_ASAN && !DISABLE_CANARY
969968 if ((GET_BIT (b , (which_bit + 1 ))) == 1 ) {
@@ -972,6 +971,9 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
972971 }
973972#endif
974973
974+ zone -> af_count ++ ;
975+ zone -> alloc_count ++ ;
976+
975977 /* Set the in-use bit */
976978 SET_BIT (b , which_bit );
977979
@@ -981,19 +983,17 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
981983 * as a canary chunk. This bit is set again upon free */
982984 UNSET_BIT (b , (which_bit + 1 ));
983985 bm [dwords_to_bit_slot ] = b ;
984- zone -> af_count ++ ;
985- zone -> alloc_count ++ ;
986986 return p ;
987987}
988988
989989/* Does not require the root is locked */
990990INTERNAL_HIDDEN INLINE void populate_zone_cache (iso_alloc_zone_t * zone ) {
991- if (UNLIKELY (zone -> internal == false)) {
991+ /* Don't cache this zone if it was recently cached */
992+ if (zone_cache_count != 0 && zone_cache [zone_cache_count - 1 ].zone == zone ) {
992993 return ;
993994 }
994995
995- /* Don't cache this zone if it was recently cached */
996- if (zone_cache_count != 0 && zone_cache [zone_cache_count - 1 ].zone == zone ) {
996+ if (UNLIKELY (zone -> internal == false)) {
997997 return ;
998998 }
999999
@@ -1408,7 +1408,9 @@ INTERNAL_HIDDEN void iso_free_chunk_from_zone(iso_alloc_zone_t *zone, void *rest
14081408 * which could result in a page fault */
14091409 bitmap_index_t b = bm [dwords_to_bit_slot ];
14101410
1411- /* Ensure the pointer is a multiple of chunk size */
1411+ /* Ensure the pointer is a multiple of chunk size. Chunk size
1412+ * should always be a power of 2 so this bitwise AND works and
1413+ * is generally faster than modulo */
14121414 if (UNLIKELY ((chunk_offset & (zone -> chunk_size - 1 )) != 0 )) {
14131415 LOG_AND_ABORT ("Chunk at 0x%p is not a multiple of zone[%d] chunk size %d. Off by %lu bits" ,
14141416 p , zone -> index , zone -> chunk_size , (chunk_offset & (zone -> chunk_size - 1 )));
@@ -1619,7 +1621,7 @@ INTERNAL_HIDDEN bool _is_zone_retired(iso_alloc_zone_t *zone) {
16191621 * and has allocated and freed more than ZONE_ALLOC_RETIRE
16201622 * chunks in its lifetime then we destroy and replace it with
16211623 * a new zone */
1622- if (UNLIKELY (zone -> af_count == 0 && zone -> alloc_count > (zone -> chunk_count * ZONE_ALLOC_RETIRE ))) {
1624+ if (UNLIKELY (zone -> af_count == 0 && zone -> alloc_count > (zone -> chunk_count << _root -> zone_retirement_shf ))) {
16231625 if (zone -> internal == true && zone -> chunk_size < (MAX_DEFAULT_ZONE_SZ * 2 )) {
16241626 return true;
16251627 }
0 commit comments