Skip to content

Commit bca0cf7

Browse files
committed
[sanitizer] Support dynamic premapped R/W range in primary allocator.
The main use case for this change is HWASan aliasing mode, which premaps the alias space adjacent to the dynamic shadow. With this change, the primary allocator can allocate from the alias space instead of a separate region. Reviewed By: vitalybuka, eugenis Differential Revision: https://reviews.llvm.org/D98293
1 parent 4f7fa06 commit bca0cf7

File tree

4 files changed

+118
-30
lines changed

4 files changed

+118
-30
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,9 @@ class CombinedAllocator {
3535
secondary_.InitLinkerInitialized();
3636
}
3737

38-
void Init(s32 release_to_os_interval_ms) {
38+
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
3939
stats_.Init();
40-
primary_.Init(release_to_os_interval_ms);
40+
primary_.Init(release_to_os_interval_ms, heap_start);
4141
secondary_.Init();
4242
}
4343

compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,8 @@ class SizeClassAllocator32 {
119119
typedef SizeClassAllocator32<Params> ThisT;
120120
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
121121

122-
void Init(s32 release_to_os_interval_ms) {
122+
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
123+
CHECK(!heap_start);
123124
possible_regions.Init();
124125
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
125126
}

compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h

+53-15
Original file line numberDiff line numberDiff line change
@@ -69,25 +69,45 @@ class SizeClassAllocator64 {
6969
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
7070
}
7171

72-
void Init(s32 release_to_os_interval_ms) {
72+
// If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
73+
// at heap_start and places the heap there. This mode requires kSpaceBeg ==
74+
// ~(uptr)0.
75+
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
7376
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
74-
if (kUsingConstantSpaceBeg) {
75-
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
76-
CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
77-
PrimaryAllocatorName, kSpaceBeg));
77+
PremappedHeap = heap_start != 0;
78+
if (PremappedHeap) {
79+
CHECK(!kUsingConstantSpaceBeg);
80+
NonConstSpaceBeg = heap_start;
81+
uptr RegionInfoSize = AdditionalSize();
82+
RegionInfoSpace =
83+
address_range.Init(RegionInfoSize, PrimaryAllocatorName);
84+
CHECK_NE(RegionInfoSpace, ~(uptr)0);
85+
CHECK_EQ(RegionInfoSpace,
86+
address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
87+
"SizeClassAllocator: region info"));
88+
MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
7889
} else {
79-
// Combined allocator expects that an 2^N allocation is always aligned to
80-
// 2^N. For this to work, the start of the space needs to be aligned as
81-
// high as the largest size class (which also needs to be a power of 2).
82-
NonConstSpaceBeg = address_range.InitAligned(
83-
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
84-
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
90+
if (kUsingConstantSpaceBeg) {
91+
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
92+
CHECK_EQ(kSpaceBeg,
93+
address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
94+
kSpaceBeg));
95+
} else {
96+
// Combined allocator expects that an 2^N allocation is always aligned
97+
// to 2^N. For this to work, the start of the space needs to be aligned
98+
// as high as the largest size class (which also needs to be a power of
99+
// 2).
100+
NonConstSpaceBeg = address_range.InitAligned(
101+
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
102+
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
103+
}
104+
RegionInfoSpace = SpaceEnd();
105+
MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
106+
"SizeClassAllocator: region info");
85107
}
86108
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
87-
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
88-
"SizeClassAllocator: region info");
89109
// Check that the RegionInfo array is aligned on the CacheLine size.
90-
DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
110+
DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
91111
}
92112

93113
s32 ReleaseToOSIntervalMs() const {
@@ -596,6 +616,11 @@ class SizeClassAllocator64 {
596616

597617
atomic_sint32_t release_to_os_interval_ms_;
598618

619+
uptr RegionInfoSpace;
620+
621+
// True if the user has already mapped the entire heap R/W.
622+
bool PremappedHeap;
623+
599624
struct Stats {
600625
uptr n_allocated;
601626
uptr n_freed;
@@ -625,7 +650,7 @@ class SizeClassAllocator64 {
625650

626651
RegionInfo *GetRegionInfo(uptr class_id) const {
627652
DCHECK_LT(class_id, kNumClasses);
628-
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
653+
RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
629654
return &regions[class_id];
630655
}
631656

@@ -650,6 +675,9 @@ class SizeClassAllocator64 {
650675
}
651676

652677
bool MapWithCallback(uptr beg, uptr size, const char *name) {
678+
if (PremappedHeap)
679+
return beg >= NonConstSpaceBeg &&
680+
beg + size <= NonConstSpaceBeg + kSpaceSize;
653681
uptr mapped = address_range.Map(beg, size, name);
654682
if (UNLIKELY(!mapped))
655683
return false;
@@ -659,11 +687,18 @@ class SizeClassAllocator64 {
659687
}
660688

661689
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
690+
if (PremappedHeap) {
691+
CHECK_GE(beg, NonConstSpaceBeg);
692+
CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
693+
return;
694+
}
662695
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
663696
MapUnmapCallback().OnMap(beg, size);
664697
}
665698

666699
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
700+
if (PremappedHeap)
701+
return;
667702
MapUnmapCallback().OnUnmap(beg, size);
668703
address_range.Unmap(beg, size);
669704
}
@@ -832,6 +867,9 @@ class SizeClassAllocator64 {
832867

833868
// Attempts to release RAM occupied by freed chunks back to OS. The region is
834869
// expected to be locked.
870+
//
871+
// TODO(morehouse): Support a callback on memory release so HWASan can release
872+
// aliases as well.
835873
void MaybeReleaseToOS(uptr class_id, bool force) {
836874
RegionInfo *region = GetRegionInfo(class_id);
837875
const uptr chunk_size = ClassIdToSize(class_id);

compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp

+61-12
Original file line numberDiff line numberDiff line change
@@ -196,9 +196,9 @@ TEST(SanitizerCommon, DenseSizeClassMap) {
196196
}
197197

198198
template <class Allocator>
199-
void TestSizeClassAllocator() {
199+
void TestSizeClassAllocator(uptr premapped_heap = 0) {
200200
Allocator *a = new Allocator;
201-
a->Init(kReleaseToOSIntervalNever);
201+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
202202
typename Allocator::AllocatorCache cache;
203203
memset(&cache, 0, sizeof(cache));
204204
cache.Init(0);
@@ -265,6 +265,25 @@ void TestSizeClassAllocator() {
265265
}
266266

267267
#if SANITIZER_CAN_USE_ALLOCATOR64
268+
269+
// Allocates kAllocatorSize aligned bytes on construction and frees it on
270+
// destruction.
271+
class ScopedPremappedHeap {
272+
public:
273+
ScopedPremappedHeap() {
274+
BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap");
275+
AlignedAddr = RoundUpTo(reinterpret_cast<uptr>(BasePtr), kAllocatorSize);
276+
}
277+
278+
~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); }
279+
280+
uptr Addr() { return AlignedAddr; }
281+
282+
private:
283+
void *BasePtr;
284+
uptr AlignedAddr;
285+
};
286+
268287
// These tests can fail on Windows if memory is somewhat full and lit happens
269288
// to run them all at the same time. FIXME: Make them not flaky and reenable.
270289
#if !SANITIZER_WINDOWS
@@ -276,6 +295,11 @@ TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
276295
TestSizeClassAllocator<Allocator64Dynamic>();
277296
}
278297

298+
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
299+
ScopedPremappedHeap h;
300+
TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
301+
}
302+
279303
#if !SANITIZER_ANDROID
280304
//FIXME(kostyak): find values so that those work on Android as well.
281305
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
@@ -320,9 +344,9 @@ TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) {
320344
}
321345

322346
template <class Allocator>
323-
void SizeClassAllocatorMetadataStress() {
347+
void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) {
324348
Allocator *a = new Allocator;
325-
a->Init(kReleaseToOSIntervalNever);
349+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
326350
typename Allocator::AllocatorCache cache;
327351
memset(&cache, 0, sizeof(cache));
328352
cache.Init(0);
@@ -361,6 +385,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
361385
SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
362386
}
363387

388+
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
389+
ScopedPremappedHeap h;
390+
SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
391+
}
392+
364393
#if !SANITIZER_ANDROID
365394
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
366395
SizeClassAllocatorMetadataStress<Allocator64Compact>();
@@ -374,9 +403,10 @@ TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
374403
}
375404

376405
template <class Allocator>
377-
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
406+
void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize,
407+
uptr premapped_heap = 0) {
378408
Allocator *a = new Allocator;
379-
a->Init(kReleaseToOSIntervalNever);
409+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
380410
typename Allocator::AllocatorCache cache;
381411
memset(&cache, 0, sizeof(cache));
382412
cache.Init(0);
@@ -408,6 +438,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
408438
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
409439
1ULL << (SANITIZER_ANDROID ? 31 : 33));
410440
}
441+
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
442+
ScopedPremappedHeap h;
443+
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
444+
1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr());
445+
}
411446
#if !SANITIZER_ANDROID
412447
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
413448
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
@@ -624,10 +659,10 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
624659
}
625660

626661
template <class PrimaryAllocator>
627-
void TestCombinedAllocator() {
662+
void TestCombinedAllocator(uptr premapped_heap = 0) {
628663
typedef CombinedAllocator<PrimaryAllocator> Allocator;
629664
Allocator *a = new Allocator;
630-
a->Init(kReleaseToOSIntervalNever);
665+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
631666
std::mt19937 r;
632667

633668
typename Allocator::AllocatorCache cache;
@@ -698,6 +733,11 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
698733
TestCombinedAllocator<Allocator64Dynamic>();
699734
}
700735

736+
TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
737+
ScopedPremappedHeap h;
738+
TestCombinedAllocator<Allocator64Dynamic>(h.Addr());
739+
}
740+
701741
#if !SANITIZER_ANDROID
702742
TEST(SanitizerCommon, CombinedAllocator64Compact) {
703743
TestCombinedAllocator<Allocator64Compact>();
@@ -714,12 +754,12 @@ TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) {
714754
}
715755

716756
template <class Allocator>
717-
void TestSizeClassAllocatorLocalCache() {
757+
void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) {
718758
using AllocatorCache = typename Allocator::AllocatorCache;
719759
AllocatorCache cache;
720760
Allocator *a = new Allocator();
721761

722-
a->Init(kReleaseToOSIntervalNever);
762+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
723763
memset(&cache, 0, sizeof(cache));
724764
cache.Init(0);
725765

@@ -759,6 +799,11 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
759799
TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
760800
}
761801

802+
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
803+
ScopedPremappedHeap h;
804+
TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
805+
}
806+
762807
#if !SANITIZER_ANDROID
763808
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
764809
TestSizeClassAllocatorLocalCache<Allocator64Compact>();
@@ -891,9 +936,9 @@ void IterationTestCallback(uptr chunk, void *arg) {
891936
}
892937

893938
template <class Allocator>
894-
void TestSizeClassAllocatorIteration() {
939+
void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) {
895940
Allocator *a = new Allocator;
896-
a->Init(kReleaseToOSIntervalNever);
941+
a->Init(kReleaseToOSIntervalNever, premapped_heap);
897942
typename Allocator::AllocatorCache cache;
898943
memset(&cache, 0, sizeof(cache));
899944
cache.Init(0);
@@ -942,6 +987,10 @@ TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
942987
TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
943988
TestSizeClassAllocatorIteration<Allocator64Dynamic>();
944989
}
990+
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
991+
ScopedPremappedHeap h;
992+
TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
993+
}
945994
#endif
946995
#endif
947996

0 commit comments

Comments
 (0)