Skip to content

Commit 58c62fd

Browse files
committed
[sanitizer] Improve accuracy of GetTls on x86/s390
The previous code may underestimate the static TLS surplus part, which may cause false positives to LeakSanitizer if a dynamically loaded module uses the surplus and there is an allocation only referenced by a thread's TLS.
1 parent 749e609 commit 58c62fd

File tree

1 file changed

+21
-9
lines changed

1 file changed

+21
-9
lines changed

compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp

+21-9
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ static int CollectStaticTlsRanges(struct dl_phdr_info *info, size_t size,
304304
return 0;
305305
}
306306

307-
static void GetStaticTlsRange(uptr *addr, uptr *size) {
307+
static void GetStaticTlsRange(uptr *addr, uptr *size, uptr *align) {
308308
InternalMmapVector<TlsRange> ranges;
309309
dl_iterate_phdr(CollectStaticTlsRanges, &ranges);
310310
uptr len = ranges.size();
@@ -318,17 +318,19 @@ static void GetStaticTlsRange(uptr *addr, uptr *size) {
318318
// This may happen with musl if no module uses PT_TLS.
319319
*addr = 0;
320320
*size = 0;
321+
*align = 1;
321322
return;
322323
}
323324
// Find the maximum consecutive ranges. We consider two modules consecutive if
324325
// the gap is smaller than the alignment. The dynamic loader places static TLS
325326
// blocks this way not to waste space.
326327
uptr l = one;
328+
*align = ranges[l].align;
327329
while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
328-
--l;
330+
*align = Max(*align, ranges[--l].align);
329331
uptr r = one + 1;
330332
while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
331-
++r;
333+
*align = Max(*align, ranges[r++].align);
332334
*addr = ranges[l].begin;
333335
*size = ranges[r - 1].end - ranges[l].begin;
334336
}
@@ -406,21 +408,31 @@ static void GetTls(uptr *addr, uptr *size) {
406408
*size = 0;
407409
}
408410
#elif SANITIZER_LINUX
409-
GetStaticTlsRange(addr, size);
411+
uptr align;
412+
GetStaticTlsRange(addr, size, &align);
410413
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
414+
if (SANITIZER_GLIBC) {
415+
#if defined(__s390__)
416+
align = Max<uptr>(align, 16);
417+
#else
418+
align = Max<uptr>(align, 64);
419+
#endif
420+
}
421+
const uptr tp = RoundUpTo(*addr + *size, align);
422+
411423
// lsan requires the range to additionally cover the static TLS surplus
412424
// (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
413425
// allocations only referenced by tls in dynamically loaded modules.
414-
if (SANITIZER_GLIBC) {
415-
*addr -= 1664;
416-
*size += 1664;
417-
}
426+
if (SANITIZER_GLIBC)
427+
*size += 1644;
428+
418429
// Extend the range to include the thread control block. On glibc, lsan needs
419430
// the range to include pthread::{specific_1stblock,specific} so that
420431
// allocations only referenced by pthread_setspecific can be scanned. This may
421432
// underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
422433
// because the number of bytes after pthread::specific is larger.
423-
*size += ThreadDescriptorSize();
434+
*addr = tp - RoundUpTo(*size, align);
435+
*size = tp - *addr + ThreadDescriptorSize();
424436
#else
425437
if (SANITIZER_GLIBC)
426438
*size += 1664;

0 commit comments

Comments
 (0)