Skip to content

Commit f194b80

Browse files
authored
Merge pull request #94 from struct/fix_zone_retirement
fix zone list index after retirement
2 parents bcddea4 + 789ef46 commit f194b80

2 files changed

Lines changed: 51 additions & 35 deletions

File tree

include/iso_alloc_internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ INTERNAL_HIDDEN INLINE void clear_zone_cache(void);
523523
INTERNAL_HIDDEN iso_alloc_zone_t *is_zone_usable(iso_alloc_zone_t *zone, size_t size);
524524
INTERNAL_HIDDEN iso_alloc_zone_t *iso_find_zone_fit(size_t size);
525525
INTERNAL_HIDDEN iso_alloc_zone_t *iso_new_zone(size_t size, bool internal);
526-
INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal);
526+
INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal, int32_t index);
527527
INTERNAL_HIDDEN iso_alloc_zone_t *iso_find_zone_bitmap_range(const void *p);
528528
INTERNAL_HIDDEN iso_alloc_zone_t *iso_find_zone_range(const void *p);
529529
INTERNAL_HIDDEN iso_alloc_zone_t *search_chunk_lookup_table(const void *p);

src/iso_alloc.c

Lines changed: 50 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ INTERNAL_HIDDEN void iso_alloc_initialize_global_root(void) {
406406
MLOCK(&chunk_lookup_table, CHUNK_TO_ZONE_TABLE_SZ);
407407

408408
for(int64_t i = 0; i < DEFAULT_ZONE_COUNT; i++) {
409-
if((_iso_new_zone(default_zones[i], true)) == NULL) {
409+
if((_iso_new_zone(default_zones[i], true, -1)) == NULL) {
410410
LOG_AND_ABORT("Failed to create a new zone");
411411
}
412412
}
@@ -545,16 +545,8 @@ INTERNAL_HIDDEN void _iso_alloc_destroy_zone_unlocked(iso_alloc_zone_t *zone, bo
545545
if(replace == true) {
546546
/* The only time we ever destroy a non-private zone
547547
* is from the destructor so its safe unmap pages */
548-
int16_t zones_used = _root->zones_used;
549-
550-
/* _iso_new_zone() will use _root->zones_used to place
551-
* the new zone at the correct index in _root->zones.
552-
* We will restore this value after the new zone has
553-
* been created */
554-
_root->zones_used = zone->index;
555548
_unmap_zone(zone);
556-
_iso_new_zone(zone->chunk_size, true);
557-
_root->zones_used = zones_used;
549+
_iso_new_zone(zone->chunk_size, true, zone->index);
558550
} else {
559551
_unmap_zone(zone);
560552
}
@@ -653,14 +645,14 @@ INTERNAL_HIDDEN iso_alloc_zone_t *iso_new_zone(size_t size, bool internal) {
653645
}
654646

655647
LOCK_ROOT();
656-
iso_alloc_zone_t *zone = _iso_new_zone(size, internal);
648+
iso_alloc_zone_t *zone = _iso_new_zone(size, internal, -1);
657649
UNLOCK_ROOT();
658650
return zone;
659651
}
660652

661653
/* Requires the root is locked */
662-
INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal) {
663-
if(UNLIKELY(_root->zones_used >= MAX_ZONES)) {
654+
INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal, int32_t index) {
655+
if(UNLIKELY(_root->zones_used >= MAX_ZONES) || UNLIKELY(index > 0 && index >= MAX_ZONES)) {
664656
LOG_AND_ABORT("Cannot allocate additional zones. I have already allocated %d", _root->zones_used);
665657
}
666658

@@ -684,10 +676,21 @@ INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal) {
684676
size = SMALLEST_CHUNK_SZ;
685677
}
686678

687-
iso_alloc_zone_t *new_zone = &_root->zones[_root->zones_used];
679+
iso_alloc_zone_t *new_zone = NULL;
680+
681+
/* We created a new zone, we did not replace a retired one */
682+
if(index > 0) {
683+
new_zone = &_root->zones[index];
684+
} else {
685+
new_zone = &_root->zones[_root->zones_used];
686+
}
688687

688+
uint16_t next_sz_index = new_zone->next_sz_index;
689689
memset(new_zone, 0x0, sizeof(iso_alloc_zone_t));
690690

691+
/* Restore next_sz_index */
692+
new_zone->next_sz_index = next_sz_index;
693+
691694
new_zone->internal = internal;
692695
new_zone->is_full = false;
693696
new_zone->chunk_size = size;
@@ -788,7 +791,13 @@ INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal) {
788791

789792
madvise(new_zone->user_pages_start, ZONE_USER_SIZE, MADV_WILLNEED);
790793

791-
new_zone->index = _root->zones_used;
794+
/* We created a new zone, we did not replace a retired one */
795+
if(index > 0) {
796+
new_zone->index = index;
797+
} else {
798+
new_zone->index = _root->zones_used;
799+
}
800+
792801
new_zone->canary_secret = rand_uint64();
793802
new_zone->pointer_mask = rand_uint64();
794803

@@ -816,32 +825,39 @@ INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal) {
816825
if(zone_lookup_table[size] == 0) {
817826
zone_lookup_table[size] = new_zone->index;
818827
} else {
819-
/* Other zones exist that hold this size. We need to
820-
* fixup the most recent ones next_sz_index member.
821-
* We do this by walking the list using next_sz_index */
822-
for(int32_t i = zone_lookup_table[size]; i < _root->zones_used;) {
823-
iso_alloc_zone_t *zt = &_root->zones[i];
824-
825-
if(zt->chunk_size != size) {
826-
LOG_AND_ABORT("Inconsistent lookup table for zone[%d] chunk size %d (%d)", zt->index, zt->chunk_size, size);
827-
}
828+
/* If this was a zone replacement then its next_sz_index
829+
* is in tact and we can leave it alone */
830+
if(index < 0) {
831+
/* Other zones exist that hold this size. We need to
832+
* fixup the most recent ones next_sz_index member.
833+
* We do this by walking the list using next_sz_index */
834+
for(int32_t i = zone_lookup_table[size]; i < _root->zones_used;) {
835+
iso_alloc_zone_t *zt = &_root->zones[i];
836+
837+
if(zt->chunk_size != size) {
838+
LOG_AND_ABORT("Inconsistent lookup table for zone[%d] chunk size %d (%d)", zt->index, zt->chunk_size, size);
839+
}
828840

829-
/* Follow this zone's next_sz_index member */
830-
if(zt->next_sz_index != 0) {
831-
i = zt->next_sz_index;
832-
} else {
833-
/* If this zones next_sz_index is zero then set
834-
* it to the zone we just created and break */
835-
zt->next_sz_index = new_zone->index;
836-
break;
841+
/* Follow this zone's next_sz_index member */
842+
if(zt->next_sz_index != 0) {
843+
i = zt->next_sz_index;
844+
} else {
845+
/* If this zones next_sz_index is zero then set
846+
* it to the zone we just created and break */
847+
zt->next_sz_index = new_zone->index;
848+
break;
849+
}
837850
}
838851
}
839852
}
840853
}
841854

842855
MASK_ZONE_PTRS(new_zone);
843856

844-
_root->zones_used++;
857+
/* We created a new zone, we did not replace a retired one */
858+
if(index < 0) {
859+
_root->zones_used++;
860+
}
845861

846862
return new_zone;
847863
}
@@ -1395,7 +1411,7 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc(iso_alloc_zone_t *zone, size_t s
13951411
} else {
13961412
/* Extra Slow Path: We need a new zone in order
13971413
* to satisfy this allocation request */
1398-
zone = _iso_new_zone(size, true);
1414+
zone = _iso_new_zone(size, true, -1);
13991415

14001416
if(UNLIKELY(zone == NULL)) {
14011417
LOG_AND_ABORT("Failed to create a zone for allocation of %zu bytes", size);

0 commit comments

Comments
 (0)