Skip to content

Commit b815a2e

Browse files
authored
Merge pull request #116 from struct/strict_spatial_sep
Strict spatial sep
2 parents c71ad7d + 153e039 commit b815a2e

3 files changed

Lines changed: 37 additions & 19 deletions

File tree

Makefile

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,14 @@ MEMCPY_SANITY = -DMEMCPY_SANITY=0
119119
## and THREAD_SUPPORT are enabled. Linux only
120120
UNINIT_READ_SANITY = -DUNINIT_READ_SANITY=0
121121

122+
## By default IsoAlloc may select a zone that holds chunks
123+
## that are larger than were requested. This is intended
124+
## to reduce memory consumpion and is only done for smaller
125+
## sizes. Enabling this feature configures IsoAlloc to only
126+
## use zones that are a perfect fit for the requested size
127+
## once its been rounded up to ALIGNMENT size (8)
128+
STRONG_SIZE_ISOLATION = -DSTRONG_SIZE_ISOLATION=0
129+
122130
## Enable a sampling mechanism that searches for references
123131
## to a chunk currently being freed. The search only overwrites
124132
## the first reference to that chunk because searching all
@@ -211,7 +219,7 @@ CFLAGS = $(COMMON_CFLAGS) $(SECURITY_FLAGS) $(BUILD_ERROR_FLAGS) $(HOOKS) $(HEAP
211219
-std=c11 $(SANITIZER_SUPPORT) $(ALLOC_SANITY) $(MEMCPY_SANITY) $(UNINIT_READ_SANITY) $(CPU_PIN) $(SCHED_GETCPU) \
212220
$(EXPERIMENTAL) $(UAF_PTR_PAGE) $(VERIFY_BIT_SLOT_CACHE) $(NAMED_MAPPINGS) $(ABORT_ON_NULL) $(NO_ZERO_ALLOCATIONS) \
213221
$(ABORT_NO_ENTROPY) $(ISO_DTOR_CLEANUP) $(SHUFFLE_BIT_SLOT_CACHE) $(USE_SPINLOCK) $(HUGE_PAGES) $(USE_MLOCK) \
214-
$(MEMORY_TAGGING)
222+
$(MEMORY_TAGGING) $(STRONG_SIZE_ISOLATION)
215223
CXXFLAGS = $(COMMON_CFLAGS) -DCPP_SUPPORT=1 -std=c++17 $(SANITIZER_SUPPORT) $(HOOKS)
216224
EXE_CFLAGS = -fPIE
217225
GDB_FLAGS = -g -ggdb3 -fno-omit-frame-pointer

README.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,9 @@ When enabled, the `CPU_PIN` feature will restrict allocations from a given zone
8484
* When `SHUFFLE_BIT_SLOT_CACHE` is enabled IsoAlloc will shuffle the bit slot cache upon creation (3-4x perf hit)
8585
* When destroying private zones if `NEVER_REUSE_ZONES` is enabled IsoAlloc won't attempt to repurpose the zone
8686
* Zones are retired and replaced after they've allocated and freed a specific number of chunks. This is calculated as `ZONE_ALLOC_RETIRE * max_chunk_count_for_zone`.
87-
* When `MEMORY_TAGGING` is enabled IsoAlloc will create a 1 byte tag for each chunk in private zones. See the [MEMORY_TAGGING.md](MEMORY_TAGGING.md) documentation, or [this test](tests/tagged_ptr_test.cpp) for an example of how to use it.
88-
* When `MEMCPY_SANITY` is enabled the allocator will hook all calls to `memcpy` and check for out of bounds r/w operations when either src or dst points to a chunk allocated by IsoAlloc
87+
* `MEMORY_TAGGING` When enabled IsoAlloc will create a 1 byte tag for each chunk in private zones. See the [MEMORY_TAGGING.md](MEMORY_TAGGING.md) documentation, or [this test](tests/tagged_ptr_test.cpp) for an example of how to use it.
88+
* `MEMCPY_SANITY` Configures the allocator will hook all calls to `memcpy` and check for out of bounds r/w operations when either src or dst points to a chunk allocated by IsoAlloc
89+
* `STRONG_SIZE_ISOLATION` Enables a policy that enforces stronger memory isolation by size
8990

9091
## Building
9192

src/iso_alloc.c

Lines changed: 25 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -801,11 +801,17 @@ INTERNAL_HIDDEN iso_alloc_zone_t *is_zone_usable(iso_alloc_zone_t *zone, size_t
801801
return NULL;
802802
}
803803

804+
#if STRONG_SIZE_ISOLATION
805+
if(UNLIKELY(zone->internal == false && size != zone->chunk_size)) {
806+
return NULL;
807+
}
808+
#endif
809+
804810
/* This zone may fit this chunk but if the zone was
805811
* created for chunks more than (N * larger) than the
806812
* requested allocation size then we would be wasting
807813
* a lot of memory by using it. We only do this for
808-
* sizes beyond ZONE_1024 bytes. In other words we can
814+
* sizes larger than 1024 bytes. In other words we can
809815
* live with some wasted space in zones that manage
810816
* chunks smaller than ZONE_1024 */
811817
if(size > ZONE_1024 && zone->chunk_size >= (size << WASTED_SZ_MULTIPLIER_SHIFT)) {
@@ -862,25 +868,23 @@ INTERNAL_HIDDEN iso_alloc_zone_t *find_suitable_zone(size_t size) {
862868
iso_alloc_zone_t *zone = NULL;
863869
int32_t i = 0;
864870

865-
if(IS_ALIGNED(size) != 0) {
866-
size = ALIGN_SZ_UP(size);
867-
}
868-
869871
size_t orig_size = size;
870872

871-
/* If we are dealing with very small zones then
873+
#if !STRONG_SIZE_ISOLATION
874+
/* If we are dealing with small zones then
872875
* find the first zone in the lookup table that
873876
* could possibly allocate this chunk. We only
874-
* do this for sizes up to 256 because we don't
877+
* do this for sizes up to 1024 because we don't
875878
* want 1) to waste memory and 2) weaken our
876879
* isolation primitives */
877-
while(size <= ZONE_256) {
880+
while(size <= ZONE_1024) {
878881
if(_root->zone_lookup_table[size] == 0) {
879882
size = next_pow2(size);
880883
} else {
881884
break;
882885
}
883886
}
887+
#endif
884888

885889
/* Fast path via lookup table */
886890
if(_root->zone_lookup_table[size] != 0) {
@@ -1021,9 +1025,14 @@ INTERNAL_HIDDEN INLINE void populate_zone_cache(iso_alloc_zone_t *zone) {
10211025

10221026
INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_calloc(size_t nmemb, size_t size) {
10231027
unsigned int res;
1028+
1029+
if((is_pow2(size)) != true) {
1030+
size = next_pow2(size);
1031+
}
1032+
10241033
size_t sz = nmemb * size;
10251034

1026-
if(__builtin_umul_overflow(nmemb, size, &res)) {
1035+
if(UNLIKELY(__builtin_umul_overflow(nmemb, size, &res))) {
10271036
LOG_AND_ABORT("Call to calloc() will overflow nmemb=%zu size=%zu", nmemb, size);
10281037
return NULL;
10291038
}
@@ -1041,6 +1050,11 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc(iso_alloc_zone_t *zone, size_t s
10411050
}
10421051
#endif
10431052

1053+
/* Sizes are always a power of 2, even for private zones */
1054+
if(size < SMALL_SZ_MAX && is_pow2(size) != true) {
1055+
size = next_pow2(size);
1056+
}
1057+
10441058
if(UNLIKELY(zone && size > zone->chunk_size)) {
10451059
LOG_AND_ABORT("Private zone %d cannot hold chunks of size %d", zone->index, zone->chunk_size);
10461060
}
@@ -1069,15 +1083,10 @@ INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc(iso_alloc_zone_t *zone, size_t s
10691083
#if ALLOC_SANITY
10701084
/* We don't sample if we are allocating from a private zone */
10711085
if(zone != NULL) {
1072-
/* We only sample allocations smaller than an individual
1073-
* page. We are unlikely to find uninitialized reads on
1074-
* larger size and it makes tracking them less complex */
1075-
const size_t sampled_size = ALIGN_SZ_UP(size);
1076-
1077-
if(sampled_size < g_page_size && _sane_sampled < MAX_SANE_SAMPLES) {
1086+
if(size < g_page_size && _sane_sampled < MAX_SANE_SAMPLES) {
10781087
/* If we chose to sample this allocation then
10791088
* _iso_alloc_sample will call UNLOCK_ROOT() */
1080-
void *ps = _iso_alloc_sample(sampled_size);
1089+
void *ps = _iso_alloc_sample(size);
10811090

10821091
if(ps != NULL) {
10831092
return ps;

0 commit comments

Comments
 (0)