Skip to content

Commit 9446be8

Browse files
authored
Merge pull request #111 from struct/6_2022_perf_fixes
6 2022 perf fixes
2 parents 870c49f + 7442573 commit 9446be8

4 files changed

Lines changed: 19 additions & 13 deletions

File tree

include/conf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545

4646
/* Zones can be retired after a certain number of
4747
* allocations. This is computed as the total count
48-
* of chunks the zone can handle multiplied by this
48+
* of chunks the zone can hold multiplied by this
4949
* value. The zone is replaced at that point if all
5050
* of its current chunks are free */
5151
#define ZONE_ALLOC_RETIRE 32

include/iso_alloc_internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -450,6 +450,7 @@ typedef struct {
450450
uint16_t zones_used;
451451
void *guard_below;
452452
void *guard_above;
453+
uint32_t zone_retirement_shf;
453454
uintptr_t *chunk_quarantine;
454455
size_t chunk_quarantine_count;
455456
/* Zones are linked by their next_sz_index member which

src/iso_alloc.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,7 @@ INTERNAL_HIDDEN void iso_alloc_initialize_global_root(void) {
177177
* result in a soft page fault */
178178
MLOCK(&_root, sizeof(iso_alloc_root));
179179

180+
_root->zone_retirement_shf = _log2(ZONE_ALLOC_RETIRE);
180181
_root->zones_size = (MAX_ZONES * sizeof(iso_alloc_zone_t));
181182
_root->zones_size += (g_page_size * 2);
182183
_root->zones_size = ROUND_UP_PAGE(_root->zones_size);
@@ -529,7 +530,6 @@ INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal, int3
529530
if(new_zone->tagged == true) {
530531
create_guard_page(p + g_page_size + tag_mapping_size);
531532
new_zone->user_pages_start = (p + g_page_size + tag_mapping_size + g_page_size);
532-
533533
uint64_t *_mtp = p + g_page_size;
534534

535535
/* Generate random tags */
@@ -779,8 +779,7 @@ INTERNAL_HIDDEN bit_slot_t iso_scan_zone_free_slot_slow(iso_alloc_zone_t *zone)
779779
bit_slot_t bts = bm[i];
780780

781781
for(int64_t j = 0; j < BITS_PER_QWORD; j += BITS_PER_CHUNK) {
782-
/* We can easily check if every bitslot represented by
783-
* this qword is allocated with or without canaries */
782+
/* Check each bit to see if its available */
784783
if((GET_BIT(bts, j)) == 0) {
785784
return ((i << BITS_PER_QWORD_SHIFT) + j);
786785
}
@@ -963,7 +962,7 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
963962

964963
/* This chunk was either previously allocated and free'd
965964
* or it's a canary chunk. In either case this means it
966-
* has a canary written in its first dword. Here we check
965+
* has a canary written in its first qword. Here we check
967966
* that canary and abort if its been corrupted */
968967
#if !ENABLE_ASAN && !DISABLE_CANARY
969968
if((GET_BIT(b, (which_bit + 1))) == 1) {
@@ -972,6 +971,9 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
972971
}
973972
#endif
974973

974+
zone->af_count++;
975+
zone->alloc_count++;
976+
975977
/* Set the in-use bit */
976978
SET_BIT(b, which_bit);
977979

@@ -981,19 +983,17 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bit
981983
* as a canary chunk. This bit is set again upon free */
982984
UNSET_BIT(b, (which_bit + 1));
983985
bm[dwords_to_bit_slot] = b;
984-
zone->af_count++;
985-
zone->alloc_count++;
986986
return p;
987987
}
988988

989989
/* Does not require the root is locked */
990990
INTERNAL_HIDDEN INLINE void populate_zone_cache(iso_alloc_zone_t *zone) {
991-
if(UNLIKELY(zone->internal == false)) {
991+
/* Don't cache this zone if it was recently cached */
992+
if(zone_cache_count != 0 && zone_cache[zone_cache_count - 1].zone == zone) {
992993
return;
993994
}
994995

995-
/* Don't cache this zone if it was recently cached */
996-
if(zone_cache_count != 0 && zone_cache[zone_cache_count - 1].zone == zone) {
996+
if(UNLIKELY(zone->internal == false)) {
997997
return;
998998
}
999999

@@ -1408,7 +1408,9 @@ INTERNAL_HIDDEN void iso_free_chunk_from_zone(iso_alloc_zone_t *zone, void *rest
14081408
* which could result in a page fault */
14091409
bitmap_index_t b = bm[dwords_to_bit_slot];
14101410

1411-
/* Ensure the pointer is a multiple of chunk size */
1411+
/* Ensure the pointer is a multiple of chunk size. Chunk size
1412+
* should always be a power of 2 so this bitwise AND works and
1413+
* is generally faster than modulo */
14121414
if(UNLIKELY((chunk_offset & (zone->chunk_size - 1)) != 0)) {
14131415
LOG_AND_ABORT("Chunk at 0x%p is not a multiple of zone[%d] chunk size %d. Off by %lu bits",
14141416
p, zone->index, zone->chunk_size, (chunk_offset & (zone->chunk_size - 1)));
@@ -1619,7 +1621,7 @@ INTERNAL_HIDDEN bool _is_zone_retired(iso_alloc_zone_t *zone) {
16191621
* and has allocated and freed more than ZONE_ALLOC_RETIRE
16201622
* chunks in its lifetime then we destroy and replace it with
16211623
* a new zone */
1622-
if(UNLIKELY(zone->af_count == 0 && zone->alloc_count > (zone->chunk_count * ZONE_ALLOC_RETIRE))) {
1624+
if(UNLIKELY(zone->af_count == 0 && zone->alloc_count > (zone->chunk_count << _root->zone_retirement_shf))) {
16231625
if(zone->internal == true && zone->chunk_size < (MAX_DEFAULT_ZONE_SZ * 2)) {
16241626
return true;
16251627
}

src/iso_alloc_mem_tags.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,10 @@ INTERNAL_HIDDEN void *_untag_ptr(void *p, iso_alloc_zone_t *zone) {
5151

5252
INTERNAL_HIDDEN bool _refresh_zone_mem_tags(iso_alloc_zone_t *zone) {
5353
#if MEMORY_TAGGING
54-
if(UNLIKELY(zone->af_count == 0 && zone->alloc_count > (zone->chunk_count * ZONE_ALLOC_RETIRE)) >> 2) {
54+
/* This implements a similar policy to zone retirement.
55+
* The only difference is that we refresh all tags at
56+
* %25 of the configured zone retirement age */
57+
if(UNLIKELY(zone->af_count == 0 && zone->alloc_count > (zone->chunk_count << _root->zone_retirement_shf)) >> 2) {
5558
size_t s = ROUND_UP_PAGE(zone->chunk_count * MEM_TAG_SIZE);
5659
uint64_t *_mtp = (zone->user_pages_start - g_page_size - s);
5760

0 commit comments

Comments
 (0)