aboutsummaryrefslogtreecommitdiffstats
path: root/lib/tsan
diff options
context:
space:
mode:
authorEd Schouten <ed@FreeBSD.org>2013-05-27 18:27:12 +0000
committerEd Schouten <ed@FreeBSD.org>2013-05-27 18:27:12 +0000
commit11023dc647fd8f41418da90d59db138400d0f334 (patch)
tree50f0ab80515576749ef638dd0766b70a65904bfa /lib/tsan
parent58aabf08b77d221489f10e274812ec60917c21a8 (diff)
downloadsrc-11023dc647fd8f41418da90d59db138400d0f334.tar.gz
src-11023dc647fd8f41418da90d59db138400d0f334.zip
Import compiler-rt r182741.vendor/compiler-rt/compiler-rt-r182741
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=251034 svn path=/vendor/compiler-rt/compiler-rt-r182741/; revision=251036; tag=vendor/compiler-rt/compiler-rt-r182741
Diffstat (limited to 'lib/tsan')
-rw-r--r--lib/tsan/CMakeLists.txt7
-rw-r--r--lib/tsan/Makefile.old15
-rwxr-xr-xlib/tsan/analyze_libtsan.sh4
-rwxr-xr-xlib/tsan/check_cmake.sh5
-rwxr-xr-xlib/tsan/go/buildgo.sh5
-rw-r--r--lib/tsan/go/test.c46
-rw-r--r--lib/tsan/go/tsan_go.cc96
-rw-r--r--lib/tsan/lit_tests/CMakeLists.txt5
-rw-r--r--lib/tsan/lit_tests/SharedLibs/lit.local.cfg4
-rw-r--r--lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc22
-rw-r--r--lib/tsan/lit_tests/Unit/lit.cfg5
-rw-r--r--lib/tsan/lit_tests/Unit/lit.site.cfg.in4
-rw-r--r--lib/tsan/lit_tests/aligned_vs_unaligned_race.cc34
-rw-r--r--lib/tsan/lit_tests/atomic_free.cc19
-rw-r--r--lib/tsan/lit_tests/atomic_free2.cc19
-rw-r--r--lib/tsan/lit_tests/atomic_norace.cc61
-rw-r--r--lib/tsan/lit_tests/atomic_race.cc80
-rw-r--r--lib/tsan/lit_tests/atomic_stack.cc29
-rw-r--r--lib/tsan/lit_tests/benign_race.cc39
-rw-r--r--lib/tsan/lit_tests/free_race.c3
-rw-r--r--lib/tsan/lit_tests/free_race2.c4
-rw-r--r--lib/tsan/lit_tests/inlined_memcpy_race.cc55
-rw-r--r--lib/tsan/lit_tests/java.h2
-rw-r--r--lib/tsan/lit_tests/java_lock.cc2
-rw-r--r--lib/tsan/lit_tests/java_lock_rec.cc54
-rw-r--r--lib/tsan/lit_tests/java_lock_rec_race.cc48
-rw-r--r--lib/tsan/lit_tests/java_rwlock.cc2
-rw-r--r--lib/tsan/lit_tests/lit.cfg24
-rw-r--r--lib/tsan/lit_tests/lit.site.cfg.in1
-rw-r--r--lib/tsan/lit_tests/load_shared_lib.cc44
-rw-r--r--lib/tsan/lit_tests/longjmp.cc22
-rw-r--r--lib/tsan/lit_tests/longjmp2.cc24
-rw-r--r--lib/tsan/lit_tests/longjmp3.cc48
-rw-r--r--lib/tsan/lit_tests/longjmp4.cc51
-rw-r--r--lib/tsan/lit_tests/malloc_overflow.cc22
-rw-r--r--lib/tsan/lit_tests/malloc_stack.cc25
-rw-r--r--lib/tsan/lit_tests/memcpy_race.cc6
-rw-r--r--lib/tsan/lit_tests/mutex_destroy_locked.cc1
-rw-r--r--lib/tsan/lit_tests/mutexset7.cc1
-rw-r--r--lib/tsan/lit_tests/mutexset8.cc39
-rw-r--r--lib/tsan/lit_tests/oob_race.cc24
-rw-r--r--lib/tsan/lit_tests/race_on_heap.cc4
-rw-r--r--lib/tsan/lit_tests/race_on_mutex.c2
-rw-r--r--lib/tsan/lit_tests/race_on_mutex2.c24
-rw-r--r--lib/tsan/lit_tests/race_on_write.cc39
-rw-r--r--lib/tsan/lit_tests/signal_errno.cc7
-rw-r--r--lib/tsan/lit_tests/signal_malloc.cc3
-rw-r--r--lib/tsan/lit_tests/simple_race.cc1
-rwxr-xr-xlib/tsan/lit_tests/test_output.sh15
-rw-r--r--lib/tsan/lit_tests/thread_end_with_ignore.cc19
-rw-r--r--lib/tsan/lit_tests/thread_end_with_ignore2.cc9
-rw-r--r--lib/tsan/lit_tests/thread_leak3.c3
-rw-r--r--lib/tsan/lit_tests/thread_leak4.c18
-rw-r--r--lib/tsan/lit_tests/thread_leak5.c19
-rw-r--r--lib/tsan/lit_tests/thread_name.cc4
-rw-r--r--lib/tsan/lit_tests/tsan-vs-gvn.cc38
-rw-r--r--lib/tsan/lit_tests/unaligned_norace.cc84
-rw-r--r--lib/tsan/lit_tests/unaligned_race.cc135
-rw-r--r--lib/tsan/lit_tests/vptr_harmful_race.cc4
-rw-r--r--lib/tsan/lit_tests/vptr_harmful_race2.cc51
-rw-r--r--lib/tsan/rtl/CMakeLists.txt29
-rw-r--r--lib/tsan/rtl/Makefile.mk2
-rw-r--r--lib/tsan/rtl/Makefile.old8
-rw-r--r--lib/tsan/rtl/tsan.syms5
-rw-r--r--lib/tsan/rtl/tsan_defs.h7
-rw-r--r--lib/tsan/rtl/tsan_fd.cc15
-rw-r--r--lib/tsan/rtl/tsan_flags.cc8
-rw-r--r--lib/tsan/rtl/tsan_flags.h10
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc401
-rw-r--r--lib/tsan/rtl/tsan_interface.cc58
-rw-r--r--lib/tsan/rtl/tsan_interface.h10
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.cc83
-rw-r--r--lib/tsan/rtl/tsan_interface_ann.h2
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc98
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h32
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc26
-rw-r--r--lib/tsan/rtl/tsan_interface_java.h13
-rw-r--r--lib/tsan/rtl/tsan_md5.cc2
-rw-r--r--lib/tsan/rtl/tsan_mman.cc143
-rw-r--r--lib/tsan/rtl/tsan_mman.h6
-rw-r--r--lib/tsan/rtl/tsan_mutex.cc4
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h2
-rw-r--r--lib/tsan/rtl/tsan_platform.h34
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc227
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc19
-rw-r--r--lib/tsan/rtl/tsan_platform_windows.cc19
-rw-r--r--lib/tsan/rtl/tsan_report.cc74
-rw-r--r--lib/tsan/rtl/tsan_report.h3
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc338
-rw-r--r--lib/tsan/rtl/tsan_rtl.h284
-rw-r--r--lib/tsan/rtl/tsan_rtl_amd64.S137
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc90
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc229
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc474
-rw-r--r--lib/tsan/rtl/tsan_stat.cc82
-rw-r--r--lib/tsan/rtl/tsan_stat.h77
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc57
-rw-r--r--lib/tsan/rtl/tsan_suppressions.h11
-rw-r--r--lib/tsan/rtl/tsan_symbolize.cc71
-rw-r--r--lib/tsan/rtl/tsan_symbolize.h1
-rw-r--r--lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc3
-rw-r--r--lib/tsan/rtl/tsan_sync.cc66
-rw-r--r--lib/tsan/rtl/tsan_sync.h2
-rw-r--r--lib/tsan/rtl/tsan_update_shadow_word_inl.h16
-rw-r--r--lib/tsan/rtl/tsan_vector.h7
-rw-r--r--lib/tsan/tests/CMakeLists.txt6
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_linux.cc2
-rw-r--r--lib/tsan/tests/unit/CMakeLists.txt1
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc66
-rw-r--r--lib/tsan/tests/unit/tsan_mutexset_test.cc3
-rw-r--r--lib/tsan/tests/unit/tsan_platform_test.cc89
-rw-r--r--lib/tsan/tests/unit/tsan_shadow_test.cc2
-rw-r--r--lib/tsan/tests/unit/tsan_suppressions_test.cc8
113 files changed, 3738 insertions, 1204 deletions
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index 34e3a2ea524e..282889567509 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -2,7 +2,12 @@
include_directories(..)
-set(TSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+# SANITIZER_COMMON_CFLAGS contains -fPIC, but it's performance-critical for
+# TSan runtime to be built with -fPIE to reduce the number of register spills.
+set(TSAN_CFLAGS
+ ${SANITIZER_COMMON_CFLAGS}
+ -fPIE
+ -fno-rtti)
# FIXME: Add support for compile flags:
# -Wframe-larger-than=512,
# -Wglobal-constructors,
diff --git a/lib/tsan/Makefile.old b/lib/tsan/Makefile.old
index 593482fbb5da..b548f5d2f6ee 100644
--- a/lib/tsan/Makefile.old
+++ b/lib/tsan/Makefile.old
@@ -1,13 +1,16 @@
DEBUG=0
LDFLAGS=-ldl -lpthread -pie
-CXXFLAGS = -fPIE -g -Wall -Werror -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+CXXFLAGS = -fPIE -fno-rtti -g -Wall -Werror \
+ -DGTEST_HAS_RTTI=0 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+CLANG=clang
+FILECHECK=FileCheck
# Silence warnings that Clang produces for gtest code.
# Use -Wno-attributes so that gcc doesn't complain about unknown warning types.
CXXFLAGS += -Wno-attributes
ifeq ($(DEBUG), 0)
CXXFLAGS += -O3
endif
-ifeq ($(CXX), clang++)
+ifeq ($(CXX), $(CLANG)++)
CXXFLAGS+= -Wno-unused-private-field -Wno-static-in-inline -Wgnu
endif
@@ -54,16 +57,16 @@ test: libtsan tsan_test
run: all
(ulimit -s 8192; ./tsan_test)
- ./lit_tests/test_output.sh
+ CC=$(CLANG) CXX=$(CLANG)++ FILECHECK=$(FILECHECK) ./lit_tests/test_output.sh
presubmit:
../sanitizer_common/scripts/check_lint.sh
# Debug build with clang.
$(MAKE) -f Makefile.old clean
- $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=clang CXX=clang++
+ $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=$(CLANG) CXX=$(CLANG)++
# Release build with clang.
$(MAKE) -f Makefile.old clean
- $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=clang CXX=clang++
+ $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=$(CLANG) CXX=$(CLANG)++
# Debug build with gcc
$(MAKE) -f Makefile.old clean
$(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=gcc CXX=g++
@@ -93,3 +96,5 @@ clean:
rm -f asm_*.s libtsan.nm libtsan.objdump */*.o tsan_test
rm -rf $(GTEST_BUILD_DIR)
$(MAKE) clean -C rtl -f Makefile.old
+ rm -f go/*.s
+ rm -rf build
diff --git a/lib/tsan/analyze_libtsan.sh b/lib/tsan/analyze_libtsan.sh
index e0805610714b..705e4c5460f2 100755
--- a/lib/tsan/analyze_libtsan.sh
+++ b/lib/tsan/analyze_libtsan.sh
@@ -4,7 +4,7 @@ set -e
set -u
get_asm() {
- grep tsan_$1.: -A 10000 libtsan.objdump | \
+ grep __tsan_$1.: -A 10000 libtsan.objdump | \
awk "/[^:]$/ {print;} />:/ {c++; if (c == 2) {exit}}"
}
@@ -27,7 +27,7 @@ for f in $list; do
file=asm_$f.s
get_asm $f > $file
tot=$(wc -l < $file)
- size=$(grep $f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}')
+ size=$(grep __tsan_$f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}')
rsp=$(grep '(%rsp)' $file | wc -l)
push=$(grep 'push' $file | wc -l)
pop=$(grep 'pop' $file | wc -l)
diff --git a/lib/tsan/check_cmake.sh b/lib/tsan/check_cmake.sh
index 5f11e727f091..52c97c339096 100755
--- a/lib/tsan/check_cmake.sh
+++ b/lib/tsan/check_cmake.sh
@@ -7,5 +7,6 @@ mkdir -p $ROOT/build
cd $ROOT/build
CC=clang CXX=clang++ cmake -DLLVM_ENABLE_WERROR=ON -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON $ROOT/../../../..
make -j64
-make check-tsan check-sanitizer -j64
-
+make check-sanitizer -j64
+make check-tsan -j64
+make check-asan -j64
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index a153afd6ee8e..51f1a7975b57 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -20,6 +20,7 @@ SRCS="
../../sanitizer_common/sanitizer_flags.cc
../../sanitizer_common/sanitizer_libc.cc
../../sanitizer_common/sanitizer_printf.cc
+ ../../sanitizer_common/sanitizer_thread_registry.cc
"
if [ "`uname -a | grep Linux`" != "" ]; then
@@ -29,7 +30,9 @@ if [ "`uname -a | grep Linux`" != "" ]; then
SRCS+="
../rtl/tsan_platform_linux.cc
../../sanitizer_common/sanitizer_posix.cc
+ ../../sanitizer_common/sanitizer_posix_libcdep.cc
../../sanitizer_common/sanitizer_linux.cc
+ ../../sanitizer_common/sanitizer_linux_libcdep.cc
"
elif [ "`uname -a | grep Darwin`" != "" ]; then
SUFFIX="darwin_amd64"
@@ -60,7 +63,7 @@ for F in $SRCS; do
cat $F >> gotsan.cc
done
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -m64 -Wall -Werror -fno-exceptions -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 $OSCFLAGS"
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -m64 -Wall -Werror -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 $OSCFLAGS"
if [ "$DEBUG" == "" ]; then
FLAGS+=" -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer"
else
diff --git a/lib/tsan/go/test.c b/lib/tsan/go/test.c
index 2414a1e9925f..902dfc915582 100644
--- a/lib/tsan/go/test.c
+++ b/lib/tsan/go/test.c
@@ -13,20 +13,20 @@
#include <stdio.h>
-void __tsan_init();
+void __tsan_init(void **thr);
void __tsan_fini();
void __tsan_map_shadow(void *addr, unsigned long size);
-void __tsan_go_start(int pgoid, int chgoid, void *pc);
-void __tsan_go_end(int goid);
-void __tsan_read(int goid, void *addr, void *pc);
-void __tsan_write(int goid, void *addr, void *pc);
-void __tsan_func_enter(int goid, void *pc);
-void __tsan_func_exit(int goid);
-void __tsan_malloc(int goid, void *p, unsigned long sz, void *pc);
+void __tsan_go_start(void *thr, void **chthr, void *pc);
+void __tsan_go_end(void *thr);
+void __tsan_read(void *thr, void *addr, void *pc);
+void __tsan_write(void *thr, void *addr, void *pc);
+void __tsan_func_enter(void *thr, void *pc);
+void __tsan_func_exit(void *thr);
+void __tsan_malloc(void *thr, void *p, unsigned long sz, void *pc);
void __tsan_free(void *p);
-void __tsan_acquire(int goid, void *addr);
-void __tsan_release(int goid, void *addr);
-void __tsan_release_merge(int goid, void *addr);
+void __tsan_acquire(void *thr, void *addr);
+void __tsan_release(void *thr, void *addr);
+void __tsan_release_merge(void *thr, void *addr);
int __tsan_symbolize(void *pc, char **img, char **rtn, char **file, int *l) {
return 0;
@@ -35,19 +35,21 @@ int __tsan_symbolize(void *pc, char **img, char **rtn, char **file, int *l) {
char buf[10];
int main(void) {
- __tsan_init();
+ void *thr0 = 0;
+ __tsan_init(&thr0);
__tsan_map_shadow(buf, sizeof(buf) + 4096);
- __tsan_func_enter(0, &main);
- __tsan_malloc(0, buf, 10, 0);
- __tsan_release(0, buf);
- __tsan_release_merge(0, buf);
- __tsan_go_start(0, 1, 0);
- __tsan_write(1, buf, 0);
- __tsan_acquire(1, buf);
- __tsan_go_end(1);
- __tsan_read(0, buf, 0);
+ __tsan_func_enter(thr0, &main);
+ __tsan_malloc(thr0, buf, 10, 0);
+ __tsan_release(thr0, buf);
+ __tsan_release_merge(thr0, buf);
+ void *thr1 = 0;
+ __tsan_go_start(thr0, &thr1, 0);
+ __tsan_write(thr1, buf, 0);
+ __tsan_acquire(thr1, buf);
+ __tsan_go_end(thr1);
+ __tsan_read(thr0, buf, 0);
__tsan_free(buf);
- __tsan_func_exit(0);
+ __tsan_func_exit(thr0);
__tsan_fini();
return 0;
}
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index 360608a0cf1b..957d58211281 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -18,10 +18,6 @@
namespace __tsan {
-const int kMaxGoroutinesEver = 128*1024;
-
-static ThreadState *goroutines[kMaxGoroutinesEver];
-
void InitializeInterceptors() {
}
@@ -80,20 +76,18 @@ ReportStack *SymbolizeCode(uptr addr) {
extern "C" {
-static void AllocGoroutine(int tid) {
- if (tid >= kMaxGoroutinesEver) {
- Printf("FATAL: Reached goroutine limit\n");
- Die();
- }
+static ThreadState *main_thr;
+
+static ThreadState *AllocGoroutine() {
ThreadState *thr = (ThreadState*)internal_alloc(MBlockThreadContex,
sizeof(ThreadState));
internal_memset(thr, 0, sizeof(*thr));
- goroutines[tid] = thr;
+ return thr;
}
-void __tsan_init() {
- AllocGoroutine(0);
- ThreadState *thr = goroutines[0];
+void __tsan_init(ThreadState **thrp) {
+ ThreadState *thr = AllocGoroutine();
+ main_thr = *thrp = thr;
thr->in_rtl++;
Initialize(thr);
thr->in_rtl--;
@@ -101,7 +95,7 @@ void __tsan_init() {
void __tsan_fini() {
// FIXME: Not necessary thread 0.
- ThreadState *thr = goroutines[0];
+ ThreadState *thr = main_thr;
thr->in_rtl++;
int res = Finalize(thr);
thr->in_rtl--;
@@ -112,44 +106,37 @@ void __tsan_map_shadow(uptr addr, uptr size) {
MapShadow(addr, size);
}
-void __tsan_read(int goid, void *addr, void *pc) {
- ThreadState *thr = goroutines[goid];
- MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, false);
+void __tsan_read(ThreadState *thr, void *addr, void *pc) {
+ MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
}
-void __tsan_write(int goid, void *addr, void *pc) {
- ThreadState *thr = goroutines[goid];
- MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, true);
+void __tsan_write(ThreadState *thr, void *addr, void *pc) {
+ MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
}
-void __tsan_read_range(int goid, void *addr, uptr size, uptr step, void *pc) {
- ThreadState *thr = goroutines[goid];
- for (uptr i = 0; i < size; i += step)
- MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, false);
+void __tsan_read_range(ThreadState *thr, void *addr, uptr size, uptr step,
+ void *pc) {
+ MemoryAccessRangeStep(thr, (uptr)pc, (uptr)addr, size, step, false);
}
-void __tsan_write_range(int goid, void *addr, uptr size, uptr step, void *pc) {
- ThreadState *thr = goroutines[goid];
- for (uptr i = 0; i < size; i += step)
- MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, true);
+void __tsan_write_range(ThreadState *thr, void *addr, uptr size, uptr step,
+ void *pc) {
+ MemoryAccessRangeStep(thr, (uptr)pc, (uptr)addr, size, step, true);
}
-void __tsan_func_enter(int goid, void *pc) {
- ThreadState *thr = goroutines[goid];
+void __tsan_func_enter(ThreadState *thr, void *pc) {
FuncEntry(thr, (uptr)pc);
}
-void __tsan_func_exit(int goid) {
- ThreadState *thr = goroutines[goid];
+void __tsan_func_exit(ThreadState *thr) {
FuncExit(thr);
}
-void __tsan_malloc(int goid, void *p, uptr sz, void *pc) {
- ThreadState *thr = goroutines[goid];
+void __tsan_malloc(ThreadState *thr, void *p, uptr sz, void *pc) {
if (thr == 0) // probably before __tsan_init()
return;
thr->in_rtl++;
- MemoryRangeImitateWrite(thr, (uptr)pc, (uptr)p, sz);
+ MemoryResetRange(thr, (uptr)pc, (uptr)p, sz);
thr->in_rtl--;
}
@@ -157,56 +144,47 @@ void __tsan_free(void *p) {
(void)p;
}
-void __tsan_go_start(int pgoid, int chgoid, void *pc) {
- if (chgoid == 0)
- return;
- AllocGoroutine(chgoid);
- ThreadState *thr = goroutines[chgoid];
- ThreadState *parent = goroutines[pgoid];
+void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
+ ThreadState *thr = AllocGoroutine();
+ *pthr = thr;
thr->in_rtl++;
parent->in_rtl++;
- int goid2 = ThreadCreate(parent, (uptr)pc, 0, true);
- ThreadStart(thr, goid2, 0);
+ int goid = ThreadCreate(parent, (uptr)pc, 0, true);
+ ThreadStart(thr, goid, 0);
parent->in_rtl--;
thr->in_rtl--;
}
-void __tsan_go_end(int goid) {
- ThreadState *thr = goroutines[goid];
+void __tsan_go_end(ThreadState *thr) {
thr->in_rtl++;
ThreadFinish(thr);
thr->in_rtl--;
internal_free(thr);
- goroutines[goid] = 0;
}
-void __tsan_acquire(int goid, void *addr) {
- ThreadState *thr = goroutines[goid];
+void __tsan_acquire(ThreadState *thr, void *addr) {
thr->in_rtl++;
Acquire(thr, 0, (uptr)addr);
thr->in_rtl--;
}
-void __tsan_release(int goid, void *addr) {
- ThreadState *thr = goroutines[goid];
+void __tsan_release(ThreadState *thr, void *addr) {
thr->in_rtl++;
ReleaseStore(thr, 0, (uptr)addr);
thr->in_rtl--;
}
-void __tsan_release_merge(int goid, void *addr) {
- ThreadState *thr = goroutines[goid];
+void __tsan_release_merge(ThreadState *thr, void *addr) {
thr->in_rtl++;
Release(thr, 0, (uptr)addr);
thr->in_rtl--;
}
-void __tsan_finalizer_goroutine(int goid) {
- ThreadState *thr = goroutines[goid];
+void __tsan_finalizer_goroutine(ThreadState *thr) {
AcquireGlobal(thr, 0);
}
-#ifdef _WIN32
+#if SANITIZER_WINDOWS
// MinGW gcc emits calls to the function.
void ___chkstk_ms(void) {
// The implementation must be along the lines of:
@@ -242,3 +220,11 @@ void ___chkstk_ms(void) {
} // extern "C"
} // namespace __tsan
+
+namespace __sanitizer {
+
+void SymbolizerPrepareForSandboxing() {
+ // Nothing to do here for Go.
+}
+
+} // namespace __sanitizer
diff --git a/lib/tsan/lit_tests/CMakeLists.txt b/lib/tsan/lit_tests/CMakeLists.txt
index ff2508dd75af..53e5015d1bc4 100644
--- a/lib/tsan/lit_tests/CMakeLists.txt
+++ b/lib/tsan/lit_tests/CMakeLists.txt
@@ -11,9 +11,8 @@ configure_lit_site_cfg(
if(COMPILER_RT_CAN_EXECUTE_TESTS)
# Run TSan output tests only if we're sure we can produce working binaries.
set(TSAN_TEST_DEPS
- clang clang-headers FileCheck count not llvm-symbolizer
- ${TSAN_RUNTIME_LIBRARIES}
- )
+ ${SANITIZER_COMMON_LIT_TEST_DEPS}
+ ${TSAN_RUNTIME_LIBRARIES})
set(TSAN_TEST_PARAMS
tsan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
)
diff --git a/lib/tsan/lit_tests/SharedLibs/lit.local.cfg b/lib/tsan/lit_tests/SharedLibs/lit.local.cfg
new file mode 100644
index 000000000000..b3677c17a0f2
--- /dev/null
+++ b/lib/tsan/lit_tests/SharedLibs/lit.local.cfg
@@ -0,0 +1,4 @@
+# Sources in this directory are compiled as shared libraries and used by
+# tests in parent directory.
+
+config.suffixes = []
diff --git a/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc b/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc
new file mode 100644
index 000000000000..d05aa6a40d18
--- /dev/null
+++ b/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc
@@ -0,0 +1,22 @@
+//===----------- load_shared_lib-so.cc --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+int GLOB_SHARED = 0;
+
+extern "C"
+void *write_from_so(void *unused) {
+ GLOB_SHARED++;
+ return NULL;
+}
diff --git a/lib/tsan/lit_tests/Unit/lit.cfg b/lib/tsan/lit_tests/Unit/lit.cfg
index 6688697c0c1b..0a0dbbfa5495 100644
--- a/lib/tsan/lit_tests/Unit/lit.cfg
+++ b/lib/tsan/lit_tests/Unit/lit.cfg
@@ -11,9 +11,8 @@ def get_required_attr(config, attr_name):
return attr_value
# Setup attributes common for all compiler-rt projects.
-llvm_src_root = get_required_attr(config, 'llvm_src_root')
-compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects",
- "compiler-rt", "lib",
+compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root')
+compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib",
"lit.common.unit.cfg")
lit.load_config(config, compiler_rt_lit_unit_cfg)
diff --git a/lib/tsan/lit_tests/Unit/lit.site.cfg.in b/lib/tsan/lit_tests/Unit/lit.site.cfg.in
index 23654b9be2ee..6eedc2180876 100644
--- a/lib/tsan/lit_tests/Unit/lit.site.cfg.in
+++ b/lib/tsan/lit_tests/Unit/lit.site.cfg.in
@@ -1,15 +1,17 @@
## Autogenerated by LLVM/Clang configuration.
# Do not edit!
-config.build_type = "@CMAKE_BUILD_TYPE@"
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
+config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
+config.llvm_build_mode = "@LLVM_BUILD_MODE@"
# LLVM tools dir can be passed in lit parameters, so try to
# apply substitution.
try:
config.llvm_tools_dir = config.llvm_tools_dir % lit.params
+ config.llvm_build_mode = config.llvm_build_mode % lit.params
except KeyError,e:
key, = e.args
lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key))
diff --git a/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc b/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc
new file mode 100644
index 000000000000..f4533d08306c
--- /dev/null
+++ b/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc
@@ -0,0 +1,34 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+// Race between an aligned access and an unaligned access, which
+// touches the same memory region.
+// This is a real race which is not detected by tsan.
+// https://code.google.com/p/thread-sanitizer/issues/detail?id=17
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+
+uint64_t Global[2];
+
+void *Thread1(void *x) {
+ Global[1]++;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ char *p1 = reinterpret_cast<char *>(&Global[0]);
+ uint64_t *p4 = reinterpret_cast<uint64_t *>(p1 + 1);
+ (*p4)++;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ printf("Pass\n");
+ // CHECK-NOT: ThreadSanitizer: data race
+ // CHECK: Pass
+ return 0;
+}
diff --git a/lib/tsan/lit_tests/atomic_free.cc b/lib/tsan/lit_tests/atomic_free.cc
new file mode 100644
index 000000000000..ba9bd5ac4aed
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_free.cc
@@ -0,0 +1,19 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ sleep(1);
+ delete a;
+ pthread_join(t, 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/atomic_free2.cc b/lib/tsan/lit_tests/atomic_free2.cc
new file mode 100644
index 000000000000..5517bf7ce902
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_free2.cc
@@ -0,0 +1,19 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *a) {
+ sleep(1);
+ __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST);
+ return 0;
+}
+
+int main() {
+ int *a = new int(0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, a);
+ delete a;
+ pthread_join(t, 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
diff --git a/lib/tsan/lit_tests/atomic_norace.cc b/lib/tsan/lit_tests/atomic_norace.cc
new file mode 100644
index 000000000000..265459b0758e
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_norace.cc
@@ -0,0 +1,61 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+const int kTestCount = 4;
+typedef long long T;
+T atomics[kTestCount * 2];
+
+void Test(int test, T *p, bool main_thread) {
+ volatile T sink;
+ if (test == 0) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ } else if (test == 1) {
+ if (main_thread)
+ __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL);
+ else
+ __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL);
+ } else if (test == 2) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ __atomic_store_n(p, 1, __ATOMIC_SEQ_CST);
+ } else if (test == 3) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ sink = *p;
+ }
+}
+
+void *Thread(void *p) {
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[i], false);
+ }
+ sleep(2);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d reverse\n", i);
+ Test(i, &atomics[kTestCount + i], false);
+ }
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d\n", i);
+ Test(i, &atomics[i], true);
+ }
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[kTestCount + i], true);
+ }
+ pthread_join(t, 0);
+}
+
+// CHECK-NOT: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/atomic_race.cc b/lib/tsan/lit_tests/atomic_race.cc
new file mode 100644
index 000000000000..360b81238889
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_race.cc
@@ -0,0 +1,80 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+#include <stdio.h>
+
+const int kTestCount = 4;
+typedef long long T;
+T atomics[kTestCount * 2];
+
+void Test(int test, T *p, bool main_thread) {
+ volatile T sink;
+ if (test == 0) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ *p = 42;
+ } else if (test == 1) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ sink = *p;
+ } else if (test == 2) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ *p = 42;
+ } else if (test == 3) {
+ if (main_thread)
+ __atomic_store_n(p, 1, __ATOMIC_SEQ_CST);
+ else
+ sink = *p;
+ }
+}
+
+void *Thread(void *p) {
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[i], false);
+ }
+ sleep(2);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d reverse\n", i);
+ Test(i, &atomics[kTestCount + i], false);
+ }
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d\n", i);
+ Test(i, &atomics[i], true);
+ }
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[kTestCount + i], true);
+ }
+ pthread_join(t, 0);
+}
+
+// CHECK: Test 0
+// CHECK: ThreadSanitizer: data race
+// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic
+// CHECK: Test 1
+// CHECK: ThreadSanitizer: data race
+// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic
+// CHECK: Test 2
+// CHECK: ThreadSanitizer: data race
+// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic
+// CHECK: Test 3
+// CHECK: ThreadSanitizer: data race
+// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic
+// CHECK: Test 0 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 1 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 2 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 3 reverse
+// CHECK: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/atomic_stack.cc b/lib/tsan/lit_tests/atomic_stack.cc
new file mode 100644
index 000000000000..50f6a8a889ca
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_stack.cc
@@ -0,0 +1,29 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+int Global;
+
+void *Thread1(void *x) {
+ sleep(1);
+ __atomic_fetch_add(&Global, 1, __ATOMIC_RELAXED);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global++;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Atomic write of size 4
+// CHECK: #0 __tsan_atomic32_fetch_add
+// CHECK: #1 Thread1
diff --git a/lib/tsan/lit_tests/benign_race.cc b/lib/tsan/lit_tests/benign_race.cc
new file mode 100644
index 000000000000..a4d4d23c362a
--- /dev/null
+++ b/lib/tsan/lit_tests/benign_race.cc
@@ -0,0 +1,39 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+int WTFGlobal;
+
+extern "C" {
+void AnnotateBenignRaceSized(const char *f, int l,
+ void *mem, unsigned int size, const char *desc);
+void WTFAnnotateBenignRaceSized(const char *f, int l,
+ void *mem, unsigned int size,
+ const char *desc);
+}
+
+
+void *Thread(void *x) {
+ Global = 42;
+ WTFGlobal = 142;
+ return 0;
+}
+
+int main() {
+ AnnotateBenignRaceSized(__FILE__, __LINE__,
+ &Global, sizeof(Global), "Race on Global");
+ WTFAnnotateBenignRaceSized(__FILE__, __LINE__,
+ &WTFGlobal, sizeof(WTFGlobal),
+ "Race on WTFGlobal");
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ Global = 43;
+ WTFGlobal = 143;
+ pthread_join(t, 0);
+ printf("OK\n");
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/free_race.c b/lib/tsan/lit_tests/free_race.c
index 7a2ec0cdbed0..ff71a4d2116b 100644
--- a/lib/tsan/lit_tests/free_race.c
+++ b/lib/tsan/lit_tests/free_race.c
@@ -40,4 +40,5 @@ int main() {
// CHECK: #1 main
// CHECK: Previous write of size 8 at {{.*}} by thread T1{{.*}}:
// CHECK: #0 free
-// CHECK: #1 Thread1
+// CHECK: #{{(1|2)}} Thread1
+// CHECK: SUMMARY: ThreadSanitizer: heap-use-after-free{{.*}}Thread2
diff --git a/lib/tsan/lit_tests/free_race2.c b/lib/tsan/lit_tests/free_race2.c
index 095f82ea0818..f20774b2d8d4 100644
--- a/lib/tsan/lit_tests/free_race2.c
+++ b/lib/tsan/lit_tests/free_race2.c
@@ -22,5 +22,5 @@ int main() {
// CHECK: #1 main
// CHECK: Previous write of size 8 at {{.*}} by main thread:
// CHECK: #0 free
-// CHECK: #1 foo
-// CHECK: #2 main
+// CHECK: #{{1|2}} foo
+// CHECK: #{{2|3}} main
diff --git a/lib/tsan/lit_tests/inlined_memcpy_race.cc b/lib/tsan/lit_tests/inlined_memcpy_race.cc
new file mode 100644
index 000000000000..6efe5a956e9d
--- /dev/null
+++ b/lib/tsan/lit_tests/inlined_memcpy_race.cc
@@ -0,0 +1,55 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+int x[4], y[4], z[4];
+
+void *MemCpyThread(void *a) {
+ memcpy((int*)a, z, 16);
+ return NULL;
+}
+
+void *MemMoveThread(void *a) {
+ memmove((int*)a, z, 16);
+ return NULL;
+}
+
+void *MemSetThread(void *a) {
+ sleep(1);
+ memset((int*)a, 0, 16);
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ // Race on x between memcpy and memset
+ pthread_create(&t[0], NULL, MemCpyThread, x);
+ pthread_create(&t[1], NULL, MemSetThread, x);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ // Race on y between memmove and memset
+ pthread_create(&t[0], NULL, MemMoveThread, y);
+ pthread_create(&t[1], NULL, MemSetThread, y);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+
+ printf("PASS\n");
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: #0 memset
+// CHECK: #1 MemSetThread
+// CHECK: Previous write
+// CHECK: #0 memcpy
+// CHECK: #1 MemCpyThread
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: #0 memset
+// CHECK: #1 MemSetThread
+// CHECK: Previous write
+// CHECK: #0 memmove
+// CHECK: #1 MemMoveThread
diff --git a/lib/tsan/lit_tests/java.h b/lib/tsan/lit_tests/java.h
index 7d61f5802864..04094197edb7 100644
--- a/lib/tsan/lit_tests/java.h
+++ b/lib/tsan/lit_tests/java.h
@@ -14,4 +14,6 @@ void __tsan_java_mutex_lock(jptr addr);
void __tsan_java_mutex_unlock(jptr addr);
void __tsan_java_mutex_read_lock(jptr addr);
void __tsan_java_mutex_read_unlock(jptr addr);
+void __tsan_java_mutex_lock_rec(jptr addr, int rec);
+int __tsan_java_mutex_unlock_rec(jptr addr);
}
diff --git a/lib/tsan/lit_tests/java_lock.cc b/lib/tsan/lit_tests/java_lock.cc
index f66f1e7097fa..d9db103504de 100644
--- a/lib/tsan/lit_tests/java_lock.cc
+++ b/lib/tsan/lit_tests/java_lock.cc
@@ -1,10 +1,12 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include "java.h"
+#include <unistd.h>
jptr varaddr;
jptr lockaddr;
void *Thread(void *p) {
+ sleep(1);
__tsan_java_mutex_lock(lockaddr);
*(int*)varaddr = 42;
__tsan_java_mutex_unlock(lockaddr);
diff --git a/lib/tsan/lit_tests/java_lock_rec.cc b/lib/tsan/lit_tests/java_lock_rec.cc
new file mode 100644
index 000000000000..5cc80d4a33ef
--- /dev/null
+++ b/lib/tsan/lit_tests/java_lock_rec.cc
@@ -0,0 +1,54 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include "java.h"
+#include <unistd.h>
+
+jptr varaddr;
+jptr lockaddr;
+
+void *Thread(void *p) {
+ __tsan_java_mutex_lock(lockaddr);
+ __tsan_java_mutex_lock(lockaddr);
+ *(int*)varaddr = 42;
+ int rec = __tsan_java_mutex_unlock_rec(lockaddr);
+ if (rec != 2) {
+ printf("FAILED 0 rec=%d\n", rec);
+ exit(1);
+ }
+ sleep(2);
+ __tsan_java_mutex_lock_rec(lockaddr, rec);
+ if (*(int*)varaddr != 43) {
+ printf("FAILED 3 var=%d\n", *(int*)varaddr);
+ exit(1);
+ }
+ __tsan_java_mutex_unlock(lockaddr);
+ __tsan_java_mutex_unlock(lockaddr);
+ return 0;
+}
+
+int main() {
+ int const kHeapSize = 1024 * 1024;
+ void *jheap = malloc(kHeapSize);
+ __tsan_java_init((jptr)jheap, kHeapSize);
+ const int kBlockSize = 16;
+ __tsan_java_alloc((jptr)jheap, kBlockSize);
+ varaddr = (jptr)jheap;
+ *(int*)varaddr = 0;
+ lockaddr = (jptr)jheap + 8;
+ pthread_t th;
+ pthread_create(&th, 0, Thread, 0);
+ sleep(1);
+ __tsan_java_mutex_lock(lockaddr);
+ if (*(int*)varaddr != 42) {
+ printf("FAILED 1 var=%d\n", *(int*)varaddr);
+ exit(1);
+ }
+ *(int*)varaddr = 43;
+ __tsan_java_mutex_unlock(lockaddr);
+ pthread_join(th, 0);
+ __tsan_java_free((jptr)jheap, kBlockSize);
+ printf("OK\n");
+ return __tsan_java_fini();
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
+// CHECK-NOT: FAILED
diff --git a/lib/tsan/lit_tests/java_lock_rec_race.cc b/lib/tsan/lit_tests/java_lock_rec_race.cc
new file mode 100644
index 000000000000..61626aaddc0d
--- /dev/null
+++ b/lib/tsan/lit_tests/java_lock_rec_race.cc
@@ -0,0 +1,48 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include "java.h"
+#include <unistd.h>
+
+jptr varaddr;
+jptr lockaddr;
+
+void *Thread(void *p) {
+ __tsan_java_mutex_lock(lockaddr);
+ __tsan_java_mutex_lock(lockaddr);
+ __tsan_java_mutex_lock(lockaddr);
+ int rec = __tsan_java_mutex_unlock_rec(lockaddr);
+ if (rec != 3) {
+ printf("FAILED 0 rec=%d\n", rec);
+ exit(1);
+ }
+ *(int*)varaddr = 42;
+ sleep(2);
+ __tsan_java_mutex_lock_rec(lockaddr, rec);
+ __tsan_java_mutex_unlock(lockaddr);
+ __tsan_java_mutex_unlock(lockaddr);
+ __tsan_java_mutex_unlock(lockaddr);
+ return 0;
+}
+
+int main() {
+ int const kHeapSize = 1024 * 1024;
+ void *jheap = malloc(kHeapSize);
+ __tsan_java_init((jptr)jheap, kHeapSize);
+ const int kBlockSize = 16;
+ __tsan_java_alloc((jptr)jheap, kBlockSize);
+ varaddr = (jptr)jheap;
+ *(int*)varaddr = 0;
+ lockaddr = (jptr)jheap + 8;
+ pthread_t th;
+ pthread_create(&th, 0, Thread, 0);
+ sleep(1);
+ __tsan_java_mutex_lock(lockaddr);
+ *(int*)varaddr = 43;
+ __tsan_java_mutex_unlock(lockaddr);
+ pthread_join(th, 0);
+ __tsan_java_free((jptr)jheap, kBlockSize);
+ printf("OK\n");
+ return __tsan_java_fini();
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK-NOT: FAILED
diff --git a/lib/tsan/lit_tests/java_rwlock.cc b/lib/tsan/lit_tests/java_rwlock.cc
index 1e8940afd7d0..d1f38733ba03 100644
--- a/lib/tsan/lit_tests/java_rwlock.cc
+++ b/lib/tsan/lit_tests/java_rwlock.cc
@@ -1,10 +1,12 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include "java.h"
+#include <unistd.h>
jptr varaddr;
jptr lockaddr;
void *Thread(void *p) {
+ sleep(1);
__tsan_java_mutex_read_lock(lockaddr);
*(int*)varaddr = 42;
__tsan_java_mutex_read_unlock(lockaddr);
diff --git a/lib/tsan/lit_tests/lit.cfg b/lib/tsan/lit_tests/lit.cfg
index 7e2db7b8fd0b..d483d2fcbdc6 100644
--- a/lib/tsan/lit_tests/lit.cfg
+++ b/lib/tsan/lit_tests/lit.cfg
@@ -2,6 +2,14 @@
import os
+def get_required_attr(config, attr_name):
+ attr_value = getattr(config, attr_name, None)
+ if not attr_value:
+ lit.fatal("No attribute %r in test configuration! You may need to run "
+ "tests from your build directory or add this attribute "
+ "to lit.site.cfg " % attr_name)
+ return attr_value
+
# Setup config name.
config.name = 'ThreadSanitizer'
@@ -30,14 +38,6 @@ if llvm_src_root is None:
if not llvm_config:
DisplayNoConfigMessage()
- # Validate that llvm-config points to the same source tree.
- llvm_src_root = lit.util.capture(["llvm-config", "--src-root"]).strip()
- tsan_test_src_root = os.path.join(llvm_src_root, "projects", "compiler-rt",
- "lib", "tsan", "lit_tests")
- if (os.path.realpath(tsan_test_src_root) !=
- os.path.realpath(config.test_source_root)):
- DisplayNoConfigMessage()
-
# Find out the presumed location of generated site config.
llvm_obj_root = lit.util.capture(["llvm-config", "--obj-root"]).strip()
tsan_site_cfg = os.path.join(llvm_obj_root, "projects", "compiler-rt",
@@ -49,8 +49,9 @@ if llvm_src_root is None:
raise SystemExit
# Setup attributes common for all compiler-rt projects.
-compiler_rt_lit_cfg = os.path.join(llvm_src_root, "projects", "compiler-rt",
- "lib", "lit.common.cfg")
+compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root')
+compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib",
+ "lit.common.cfg")
if (not compiler_rt_lit_cfg) or (not os.path.exists(compiler_rt_lit_cfg)):
lit.fatal("Can't find common compiler-rt lit config at: %r"
% compiler_rt_lit_cfg)
@@ -69,11 +70,8 @@ config.environment['TSAN_OPTIONS'] = tsan_options
# Setup default compiler flags used with -fsanitize=thread option.
# FIXME: Review the set of required flags and check if it can be reduced.
clang_tsan_cflags = ("-fsanitize=thread "
- + "-fPIE "
- + "-fno-builtin "
+ "-g "
+ "-Wall "
- + "-pie "
+ "-lpthread "
+ "-ldl ")
clang_tsan_cxxflags = "-ccc-cxx " + clang_tsan_cflags
diff --git a/lib/tsan/lit_tests/lit.site.cfg.in b/lib/tsan/lit_tests/lit.site.cfg.in
index b1c6ccf544ea..07b521af061f 100644
--- a/lib/tsan/lit_tests/lit.site.cfg.in
+++ b/lib/tsan/lit_tests/lit.site.cfg.in
@@ -4,6 +4,7 @@
config.clang = "@LLVM_BINARY_DIR@/bin/clang"
config.host_os = "@HOST_OS@"
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
+config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
config.target_triple = "@TARGET_TRIPLE@"
diff --git a/lib/tsan/lit_tests/load_shared_lib.cc b/lib/tsan/lit_tests/load_shared_lib.cc
new file mode 100644
index 000000000000..dd6fa0964f4a
--- /dev/null
+++ b/lib/tsan/lit_tests/load_shared_lib.cc
@@ -0,0 +1,44 @@
+// Check that if the list of shared libraries changes between the two race
+// reports, the second report occurring in a new shared library is still
+// symbolized correctly.
+
+// RUN: %clangxx_tsan -O1 %p/SharedLibs/load_shared_lib-so.cc \
+// RUN: -fPIC -shared -o %t-so.so
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+
+#include <dlfcn.h>
+#include <pthread.h>
+#include <stdio.h>
+
+#include <string>
+
+int GLOB = 0;
+
+void *write_glob(void *unused) {
+ GLOB++;
+ return NULL;
+}
+
+void race_two_threads(void *(*access_callback)(void *unused)) {
+ pthread_t t1, t2;
+ pthread_create(&t1, NULL, access_callback, NULL);
+ pthread_create(&t2, NULL, access_callback, NULL);
+ pthread_join(t1, NULL);
+ pthread_join(t2, NULL);
+}
+
+int main(int argc, char *argv[]) {
+ std::string path = std::string(argv[0]) + std::string("-so.so");
+ race_two_threads(write_glob);
+ // CHECK: write_glob
+ void *lib = dlopen(path.c_str(), RTLD_NOW);
+ if (!lib) {
+ printf("error in dlopen(): %s\n", dlerror());
+ return 1;
+ }
+ void *(*write_from_so)(void *unused);
+ *(void **)&write_from_so = dlsym(lib, "write_from_so");
+ race_two_threads(write_from_so);
+ // CHECK: write_from_so
+ return 0;
+}
diff --git a/lib/tsan/lit_tests/longjmp.cc b/lib/tsan/lit_tests/longjmp.cc
new file mode 100644
index 000000000000..d9ca4ca5e6e9
--- /dev/null
+++ b/lib/tsan/lit_tests/longjmp.cc
@@ -0,0 +1,22 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+
+int foo(jmp_buf env) {
+ longjmp(env, 42);
+}
+
+int main() {
+ jmp_buf env;
+ if (setjmp(env) == 42) {
+ printf("JUMPED\n");
+ return 0;
+ }
+ foo(env);
+ printf("FAILED\n");
+ return 0;
+}
+
+// CHECK-NOT: FAILED
+// CHECK: JUMPED
diff --git a/lib/tsan/lit_tests/longjmp2.cc b/lib/tsan/lit_tests/longjmp2.cc
new file mode 100644
index 000000000000..0d551fa19d94
--- /dev/null
+++ b/lib/tsan/lit_tests/longjmp2.cc
@@ -0,0 +1,24 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+
+int foo(sigjmp_buf env) {
+ printf("env=%p\n", env);
+ siglongjmp(env, 42);
+}
+
+int main() {
+ sigjmp_buf env;
+ printf("env=%p\n", env);
+ if (sigsetjmp(env, 1) == 42) {
+ printf("JUMPED\n");
+ return 0;
+ }
+ foo(env);
+ printf("FAILED\n");
+ return 0;
+}
+
+// CHECK-NOT: FAILED
+// CHECK: JUMPED
diff --git a/lib/tsan/lit_tests/longjmp3.cc b/lib/tsan/lit_tests/longjmp3.cc
new file mode 100644
index 000000000000..87fabd0b3be2
--- /dev/null
+++ b/lib/tsan/lit_tests/longjmp3.cc
@@ -0,0 +1,48 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+
+void bar(jmp_buf env) {
+ volatile int x = 42;
+ longjmp(env, 42);
+ x++;
+}
+
+void foo(jmp_buf env) {
+ volatile int x = 42;
+ bar(env);
+ x++;
+}
+
+void badguy() {
+ pthread_mutex_t mtx;
+ pthread_mutex_init(&mtx, 0);
+ pthread_mutex_lock(&mtx);
+ pthread_mutex_destroy(&mtx);
+}
+
+void mymain() {
+ jmp_buf env;
+ if (setjmp(env) == 42) {
+ badguy();
+ return;
+ }
+ foo(env);
+ printf("FAILED\n");
+}
+
+int main() {
+ volatile int x = 42;
+ mymain();
+ return x;
+}
+
+// CHECK-NOT: FAILED
+// CHECK: WARNING: ThreadSanitizer: destroy of a locked mutex
+// CHECK: #0 pthread_mutex_destroy
+// CHECK: #1 badguy
+// CHECK: #2 mymain
+// CHECK: #3 main
+
diff --git a/lib/tsan/lit_tests/longjmp4.cc b/lib/tsan/lit_tests/longjmp4.cc
new file mode 100644
index 000000000000..a8764dda5a6b
--- /dev/null
+++ b/lib/tsan/lit_tests/longjmp4.cc
@@ -0,0 +1,51 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+#include <string.h>
+
+void bar(jmp_buf env) {
+ volatile int x = 42;
+ jmp_buf env2;
+ memcpy(env2, env, sizeof(jmp_buf));
+ longjmp(env2, 42);
+ x++;
+}
+
+void foo(jmp_buf env) {
+ volatile int x = 42;
+ bar(env);
+ x++;
+}
+
+void badguy() {
+ pthread_mutex_t mtx;
+ pthread_mutex_init(&mtx, 0);
+ pthread_mutex_lock(&mtx);
+ pthread_mutex_destroy(&mtx);
+}
+
+void mymain() {
+ jmp_buf env;
+ if (setjmp(env) == 42) {
+ badguy();
+ return;
+ }
+ foo(env);
+ printf("FAILED\n");
+}
+
+int main() {
+ volatile int x = 42;
+ mymain();
+ return x;
+}
+
+// CHECK-NOT: FAILED
+// CHECK: WARNING: ThreadSanitizer: destroy of a locked mutex
+// CHECK: #0 pthread_mutex_destroy
+// CHECK: #1 badguy
+// CHECK: #2 mymain
+// CHECK: #3 main
+
diff --git a/lib/tsan/lit_tests/malloc_overflow.cc b/lib/tsan/lit_tests/malloc_overflow.cc
new file mode 100644
index 000000000000..19423c5f93f1
--- /dev/null
+++ b/lib/tsan/lit_tests/malloc_overflow.cc
@@ -0,0 +1,22 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <stdio.h>
+#include <stdlib.h>
+
+int main() {
+ void *p = malloc((size_t)-1);
+ if (p != 0)
+ printf("FAIL malloc(-1) = %p\n", p);
+ p = malloc((size_t)-1 / 2);
+ if (p != 0)
+ printf("FAIL malloc(-1/2) = %p\n", p);
+ p = calloc((size_t)-1, (size_t)-1);
+ if (p != 0)
+ printf("FAIL calloc(-1, -1) = %p\n", p);
+ p = calloc((size_t)-1 / 2, (size_t)-1 / 2);
+ if (p != 0)
+ printf("FAIL calloc(-1/2, -1/2) = %p\n", p);
+ printf("OK\n");
+}
+
+// CHECK-NOT: FAIL
+// CHECK-NOT: failed to allocate
diff --git a/lib/tsan/lit_tests/malloc_stack.cc b/lib/tsan/lit_tests/malloc_stack.cc
new file mode 100644
index 000000000000..c185623ff5ca
--- /dev/null
+++ b/lib/tsan/lit_tests/malloc_stack.cc
@@ -0,0 +1,25 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+_Atomic(int*) p;
+
+void *thr(void *a) {
+ sleep(1);
+ int *pp = __c11_atomic_load(&p, __ATOMIC_RELAXED);
+ *pp = 42;
+ return 0;
+}
+
+int main() {
+ pthread_t th;
+ pthread_create(&th, 0, thr, p);
+ __c11_atomic_store(&p, new int, __ATOMIC_RELAXED);
+ pthread_join(th, 0);
+}
+
+// CHECK: data race
+// CHECK: Previous write
+// CHECK: #0 operator new
+// CHECK: Location is heap block
+// CHECK: #0 operator new
diff --git a/lib/tsan/lit_tests/memcpy_race.cc b/lib/tsan/lit_tests/memcpy_race.cc
index 806740dda241..857728ba0540 100644
--- a/lib/tsan/lit_tests/memcpy_race.cc
+++ b/lib/tsan/lit_tests/memcpy_race.cc
@@ -10,13 +10,15 @@ char *data1 = new char[10];
char *data2 = new char[10];
void *Thread1(void *x) {
- memcpy(data+5, data1, 1);
+ static volatile int size = 1;
+ memcpy(data+5, data1, size);
return NULL;
}
void *Thread2(void *x) {
+ static volatile int size = 4;
sleep(1);
- memcpy(data+3, data2, 4);
+ memcpy(data+3, data2, size);
return NULL;
}
diff --git a/lib/tsan/lit_tests/mutex_destroy_locked.cc b/lib/tsan/lit_tests/mutex_destroy_locked.cc
index 991eaf5426e2..27a04248b172 100644
--- a/lib/tsan/lit_tests/mutex_destroy_locked.cc
+++ b/lib/tsan/lit_tests/mutex_destroy_locked.cc
@@ -19,3 +19,4 @@ int main() {
// CHECK: Mutex {{.*}} created at:
// CHECK: #0 pthread_mutex_init
// CHECK: #1 main
+// CHECK: SUMMARY: ThreadSanitizer: destroy of a locked mutex{{.*}}main
diff --git a/lib/tsan/lit_tests/mutexset7.cc b/lib/tsan/lit_tests/mutexset7.cc
index 141bde2b5015..3ec1b5202983 100644
--- a/lib/tsan/lit_tests/mutexset7.cc
+++ b/lib/tsan/lit_tests/mutexset7.cc
@@ -4,6 +4,7 @@
#include <unistd.h>
int Global;
+__thread int huge[1024*1024];
void *Thread1(void *x) {
sleep(1);
diff --git a/lib/tsan/lit_tests/mutexset8.cc b/lib/tsan/lit_tests/mutexset8.cc
new file mode 100644
index 000000000000..6db63f7d16db
--- /dev/null
+++ b/lib/tsan/lit_tests/mutexset8.cc
@@ -0,0 +1,39 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+int Global;
+pthread_mutex_t *mtx;
+
+void *Thread1(void *x) {
+ sleep(1);
+ pthread_mutex_lock(mtx);
+ Global++;
+ pthread_mutex_unlock(mtx);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ Global--;
+ return NULL;
+}
+
+int main() {
+ // CHECK: WARNING: ThreadSanitizer: data race
+ // CHECK: Write of size 4 at {{.*}} by thread T1
+ // CHECK: (mutexes: write [[M1:M[0-9]+]]):
+ // CHECK: Previous write of size 4 at {{.*}} by thread T2:
+ // CHECK: Mutex [[M1]] created at:
+ // CHECK: #0 pthread_mutex_init
+ // CHECK: #1 main {{.*}}/mutexset8.cc
+ mtx = new pthread_mutex_t;
+ pthread_mutex_init(mtx, 0);
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ pthread_mutex_destroy(mtx);
+ delete mtx;
+}
diff --git a/lib/tsan/lit_tests/oob_race.cc b/lib/tsan/lit_tests/oob_race.cc
new file mode 100644
index 000000000000..2e7f0593fd8d
--- /dev/null
+++ b/lib/tsan/lit_tests/oob_race.cc
@@ -0,0 +1,24 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+
+const long kOffset = 64*1024;
+
+void *Thread(void *p) {
+ ((char*)p)[-kOffset] = 43;
+ return 0;
+}
+
+int main() {
+ char *volatile p0 = new char[16];
+ delete[] p0;
+ char *p = new char[32];
+ pthread_t th;
+ pthread_create(&th, 0, Thread, p);
+ p[-kOffset] = 42;
+ pthread_join(th, 0);
+}
+
+// Used to crash with CHECK failed.
+// CHECK: WARNING: ThreadSanitizer: data race
+
diff --git a/lib/tsan/lit_tests/race_on_heap.cc b/lib/tsan/lit_tests/race_on_heap.cc
index dc679e8bf3f9..35434eac1850 100644
--- a/lib/tsan/lit_tests/race_on_heap.cc
+++ b/lib/tsan/lit_tests/race_on_heap.cc
@@ -39,8 +39,8 @@ int main() {
// ...
// CHECK: Location is heap block of size 99 at [[ADDR]] allocated by thread T1:
// CHCEKL #0 malloc
-// CHECK: #1 alloc
-// CHECK: #2 AllocThread
+// CHECK: #{{1|2}} alloc
+// CHECK: #{{2|3}} AllocThread
// ...
// CHECK: Thread T1 (tid={{.*}}, finished) created by main thread at:
// CHECK: #0 pthread_create
diff --git a/lib/tsan/lit_tests/race_on_mutex.c b/lib/tsan/lit_tests/race_on_mutex.c
index de1c2d4160a6..aff32f9bb1a2 100644
--- a/lib/tsan/lit_tests/race_on_mutex.c
+++ b/lib/tsan/lit_tests/race_on_mutex.c
@@ -34,7 +34,7 @@ int main() {
}
// CHECK: WARNING: ThreadSanitizer: data race
-// CHECK-NEXT: Read of size 1 at {{.*}} by thread T2:
+// CHECK-NEXT: Atomic read of size 1 at {{.*}} by thread T2:
// CHECK-NEXT: #0 pthread_mutex_lock
// CHECK-NEXT: #1 Thread2{{.*}} {{.*}}race_on_mutex.c:20{{(:3)?}} ({{.*}})
// CHECK: Previous write of size 1 at {{.*}} by thread T1:
diff --git a/lib/tsan/lit_tests/race_on_mutex2.c b/lib/tsan/lit_tests/race_on_mutex2.c
new file mode 100644
index 000000000000..84bef75a3449
--- /dev/null
+++ b/lib/tsan/lit_tests/race_on_mutex2.c
@@ -0,0 +1,24 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ pthread_mutex_lock((pthread_mutex_t*)x);
+ pthread_mutex_unlock((pthread_mutex_t*)x);
+ return 0;
+}
+
+int main() {
+ pthread_mutex_t Mtx;
+ pthread_mutex_init(&Mtx, 0);
+ pthread_t t;
+ pthread_create(&t, 0, Thread, &Mtx);
+ sleep(1);
+ pthread_mutex_destroy(&Mtx);
+ pthread_join(t, 0);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/race_on_write.cc b/lib/tsan/lit_tests/race_on_write.cc
new file mode 100644
index 000000000000..f1b0bb1cbd6e
--- /dev/null
+++ b/lib/tsan/lit_tests/race_on_write.cc
@@ -0,0 +1,39 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+int fd;
+char buf;
+
+void *Thread1(void *x) {
+ buf = 1;
+ sleep(1);
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ write(fd, &buf, 1);
+ return NULL;
+}
+
+int main() {
+ fd = open("/dev/null", O_WRONLY);
+ if (fd < 0) return 1;
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ sleep(1);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ close(fd);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: Read of size 1
+// CHECK: #0 write
+// CHECK: Previous write of size 1
+// CHECK: #0 Thread1
diff --git a/lib/tsan/lit_tests/signal_errno.cc b/lib/tsan/lit_tests/signal_errno.cc
index af9ccce9045a..8181555f6f63 100644
--- a/lib/tsan/lit_tests/signal_errno.cc
+++ b/lib/tsan/lit_tests/signal_errno.cc
@@ -10,7 +10,7 @@
pthread_t mainth;
volatile int done;
-static void handler(int, siginfo_t *s, void *c) {
+static void MyHandler(int, siginfo_t *s, void *c) {
errno = 1;
done = 1;
}
@@ -23,7 +23,7 @@ static void* sendsignal(void *p) {
int main() {
mainth = pthread_self();
struct sigaction act = {};
- act.sa_sigaction = &handler;
+ act.sa_sigaction = &MyHandler;
sigaction(SIGPROF, &act, 0);
pthread_t th;
pthread_create(&th, 0, sendsignal, 0);
@@ -38,5 +38,6 @@ int main() {
}
// CHECK: WARNING: ThreadSanitizer: signal handler spoils errno
-// CHECK: #0 handler(int, siginfo*, void*) {{.*}}signal_errno.cc
+// CHECK: #0 MyHandler(int, siginfo{{(_t)?}}*, void*) {{.*}}signal_errno.cc
+// CHECK: SUMMARY: ThreadSanitizer: signal handler spoils errno{{.*}}MyHandler
diff --git a/lib/tsan/lit_tests/signal_malloc.cc b/lib/tsan/lit_tests/signal_malloc.cc
index cee997cdb763..4dbc2f78ab17 100644
--- a/lib/tsan/lit_tests/signal_malloc.cc
+++ b/lib/tsan/lit_tests/signal_malloc.cc
@@ -8,7 +8,8 @@
static void handler(int, siginfo_t*, void*) {
// CHECK: WARNING: ThreadSanitizer: signal-unsafe call inside of a signal
// CHECK: #0 malloc
- // CHECK: #1 handler(int, siginfo*, void*) {{.*}}signal_malloc.cc:[[@LINE+1]]
+ // CHECK: #{{(1|2)}} handler(int, siginfo{{(_t)?}}*, void*) {{.*}}signal_malloc.cc:[[@LINE+2]]
+ // CHECK: SUMMARY: ThreadSanitizer: signal-unsafe call inside of a signal{{.*}}handler
volatile char *p = (char*)malloc(1);
p[0] = 0;
free((void*)p);
diff --git a/lib/tsan/lit_tests/simple_race.cc b/lib/tsan/lit_tests/simple_race.cc
index ec29c92ee1a8..99cf228ac2f2 100644
--- a/lib/tsan/lit_tests/simple_race.cc
+++ b/lib/tsan/lit_tests/simple_race.cc
@@ -23,3 +23,4 @@ int main() {
}
// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: SUMMARY: ThreadSanitizer: data race{{.*}}Thread
diff --git a/lib/tsan/lit_tests/test_output.sh b/lib/tsan/lit_tests/test_output.sh
index d21c9a797ad3..1eedf6eb20a3 100755
--- a/lib/tsan/lit_tests/test_output.sh
+++ b/lib/tsan/lit_tests/test_output.sh
@@ -6,12 +6,13 @@ set -e # fail on any error
ROOTDIR=$(dirname $0)/..
BLACKLIST=$ROOTDIR/lit_tests/Helpers/blacklist.txt
-# Assuming clang is in path.
-CC=clang
-CXX=clang++
+# Assume clang and clang++ are in path.
+: ${CC:=clang}
+: ${CXX:=clang++}
+: ${FILECHECK:=FileCheck}
# TODO: add testing for all of -O0...-O3
-CFLAGS="-fsanitize=thread -fsanitize-blacklist=$BLACKLIST -fPIE -O1 -g -fno-builtin -Wall"
+CFLAGS="-fsanitize=thread -fsanitize-blacklist=$BLACKLIST -fPIE -O1 -g -Wall"
LDFLAGS="-pie -lpthread -ldl $ROOTDIR/rtl/libtsan.a"
test_file() {
@@ -23,7 +24,7 @@ test_file() {
$COMPILER $SRC $CFLAGS -c -o $OBJ
$COMPILER $OBJ $LDFLAGS -o $EXE
RES=$($EXE 2>&1 || true)
- printf "%s\n" "$RES" | FileCheck $SRC
+ printf "%s\n" "$RES" | $FILECHECK $SRC
if [ "$3" == "" ]; then
rm -f $EXE $OBJ
fi
@@ -35,6 +36,10 @@ if [ "$1" == "" ]; then
echo SKIPPING FAILING TEST $c
continue
fi
+ if [[ $c == */load_shared_lib.cc ]]; then
+ echo TEST $c is not supported
+ continue
+ fi
COMPILER=$CXX
case $c in
*.c) COMPILER=$CC
diff --git a/lib/tsan/lit_tests/thread_end_with_ignore.cc b/lib/tsan/lit_tests/thread_end_with_ignore.cc
new file mode 100644
index 000000000000..960a477c5ad3
--- /dev/null
+++ b/lib/tsan/lit_tests/thread_end_with_ignore.cc
@@ -0,0 +1,19 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+
+extern "C" void AnnotateIgnoreReadsBegin(const char *f, int l);
+
+void *Thread(void *x) {
+ AnnotateIgnoreReadsBegin("", 0);
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ pthread_join(t, 0);
+}
+
+// CHECK: ThreadSanitizer: thread T1 finished with ignores enabled
+
diff --git a/lib/tsan/lit_tests/thread_end_with_ignore2.cc b/lib/tsan/lit_tests/thread_end_with_ignore2.cc
new file mode 100644
index 000000000000..8f743ae2f4a4
--- /dev/null
+++ b/lib/tsan/lit_tests/thread_end_with_ignore2.cc
@@ -0,0 +1,9 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+extern "C" void AnnotateIgnoreWritesBegin(const char *f, int l);
+
+int main() {
+ AnnotateIgnoreWritesBegin("", 0);
+}
+
+// CHECK: ThreadSanitizer: thread T0 finished with ignores enabled
+
diff --git a/lib/tsan/lit_tests/thread_leak3.c b/lib/tsan/lit_tests/thread_leak3.c
index c48219fe73fa..3577164cad4a 100644
--- a/lib/tsan/lit_tests/thread_leak3.c
+++ b/lib/tsan/lit_tests/thread_leak3.c
@@ -1,5 +1,6 @@
// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
#include <pthread.h>
+#include <unistd.h>
void *Thread(void *x) {
return 0;
@@ -8,7 +9,9 @@ void *Thread(void *x) {
int main() {
pthread_t t;
pthread_create(&t, 0, Thread, 0);
+ sleep(1);
return 0;
}
// CHECK: WARNING: ThreadSanitizer: thread leak
+// CHECK: SUMMARY: ThreadSanitizer: thread leak{{.*}}main
diff --git a/lib/tsan/lit_tests/thread_leak4.c b/lib/tsan/lit_tests/thread_leak4.c
new file mode 100644
index 000000000000..f9fad0360d34
--- /dev/null
+++ b/lib/tsan/lit_tests/thread_leak4.c
@@ -0,0 +1,18 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void *Thread(void *x) {
+ sleep(10);
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ printf("OK\n");
+ return 0;
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: thread leak
diff --git a/lib/tsan/lit_tests/thread_leak5.c b/lib/tsan/lit_tests/thread_leak5.c
new file mode 100644
index 000000000000..fc72b149ec25
--- /dev/null
+++ b/lib/tsan/lit_tests/thread_leak5.c
@@ -0,0 +1,19 @@
+// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+
+void *Thread(void *x) {
+ return 0;
+}
+
+int main() {
+ for (int i = 0; i < 5; i++) {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ }
+ sleep(1);
+ return 0;
+}
+
+// CHECK: WARNING: ThreadSanitizer: thread leak
+// CHECK: And 4 more similar thread leaks
diff --git a/lib/tsan/lit_tests/thread_name.cc b/lib/tsan/lit_tests/thread_name.cc
index 0ca0b1769976..37f308ffbc0c 100644
--- a/lib/tsan/lit_tests/thread_name.cc
+++ b/lib/tsan/lit_tests/thread_name.cc
@@ -15,7 +15,11 @@ void *Thread1(void *x) {
}
void *Thread2(void *x) {
+#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 12)
pthread_setname_np(pthread_self(), "Thread2");
+#else
+ AnnotateThreadName(__FILE__, __LINE__, "Thread2");
+#endif
Global--;
return NULL;
}
diff --git a/lib/tsan/lit_tests/tsan-vs-gvn.cc b/lib/tsan/lit_tests/tsan-vs-gvn.cc
new file mode 100644
index 000000000000..40ae724b78e1
--- /dev/null
+++ b/lib/tsan/lit_tests/tsan-vs-gvn.cc
@@ -0,0 +1,38 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+// RUN: %clangxx_tsan -O2 %s -o %t && %t 2>&1 | FileCheck %s
+// RUN: %clangxx_tsan -O3 %s -o %t && %t 2>&1 | FileCheck %s
+//
+// Check that load widening is not tsan-hostile.
+#include <pthread.h>
+#include <stdio.h>
+#include <string.h>
+
+struct {
+ int i;
+ char c1, c2, c3, c4;
+} S;
+
+int G;
+
+void *Thread1(void *x) {
+ G = S.c1 + S.c3;
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ S.c2 = 1;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ memset(&S, 123, sizeof(S));
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+ printf("PASS\n");
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer: data race
+// CHECK: PASS
diff --git a/lib/tsan/lit_tests/unaligned_norace.cc b/lib/tsan/lit_tests/unaligned_norace.cc
new file mode 100644
index 000000000000..792224b80126
--- /dev/null
+++ b/lib/tsan/lit_tests/unaligned_norace.cc
@@ -0,0 +1,84 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+uint64_t objs[8*3*3*2][3];
+
+extern "C" {
+uint16_t __tsan_unaligned_read2(void *addr);
+uint32_t __tsan_unaligned_read4(void *addr);
+uint64_t __tsan_unaligned_read8(void *addr);
+void __tsan_unaligned_write2(void *addr, uint16_t v);
+void __tsan_unaligned_write4(void *addr, uint32_t v);
+void __tsan_unaligned_write8(void *addr, uint64_t v);
+}
+
+static void access(char *p, int sz, int rw) {
+ if (rw) {
+ switch (sz) {
+ case 0: __tsan_unaligned_write2(p, 0); break;
+ case 1: __tsan_unaligned_write4(p, 0); break;
+ case 2: __tsan_unaligned_write8(p, 0); break;
+ default: exit(1);
+ }
+ } else {
+ switch (sz) {
+ case 0: __tsan_unaligned_read2(p); break;
+ case 1: __tsan_unaligned_read4(p); break;
+ case 2: __tsan_unaligned_read8(p); break;
+ default: exit(1);
+ }
+ }
+}
+
+static int accesssize(int sz) {
+ switch (sz) {
+ case 0: return 2;
+ case 1: return 4;
+ case 2: return 8;
+ }
+ exit(1);
+}
+
+void Test(bool main) {
+ uint64_t *obj = objs[0];
+ for (int off = 0; off < 8; off++) {
+ for (int sz1 = 0; sz1 < 3; sz1++) {
+ for (int sz2 = 0; sz2 < 3; sz2++) {
+ for (int rw = 0; rw < 2; rw++) {
+ char *p = (char*)obj + off;
+ if (main) {
+ // printf("thr=%d off=%d sz1=%d sz2=%d rw=%d p=%p\n",
+ // main, off, sz1, sz2, rw, p);
+ access(p, sz1, true);
+ } else {
+ p += accesssize(sz1);
+ // printf("thr=%d off=%d sz1=%d sz2=%d rw=%d p=%p\n",
+ // main, off, sz1, sz2, rw, p);
+ access(p, sz2, rw);
+ }
+ obj += 3;
+ }
+ }
+ }
+ }
+}
+
+void *Thread(void *p) {
+ (void)p;
+ Test(false);
+ return 0;
+}
+
+int main() {
+ pthread_t th;
+ pthread_create(&th, 0, Thread, 0);
+ Test(true);
+ pthread_join(th, 0);
+ printf("OK\n");
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer:
+// CHECK: OK
diff --git a/lib/tsan/lit_tests/unaligned_race.cc b/lib/tsan/lit_tests/unaligned_race.cc
new file mode 100644
index 000000000000..18bed8555cc5
--- /dev/null
+++ b/lib/tsan/lit_tests/unaligned_race.cc
@@ -0,0 +1,135 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+
+uint64_t objs[8*2*(2 + 4 + 8)][2];
+
+extern "C" {
+uint16_t __sanitizer_unaligned_load16(void *addr);
+uint32_t __sanitizer_unaligned_load32(void *addr);
+uint64_t __sanitizer_unaligned_load64(void *addr);
+void __sanitizer_unaligned_store16(void *addr, uint16_t v);
+void __sanitizer_unaligned_store32(void *addr, uint32_t v);
+void __sanitizer_unaligned_store64(void *addr, uint64_t v);
+}
+
+// All this mess is to generate unique stack for each race,
+// otherwise tsan will suppress similar stacks.
+
+static void access(char *p, int sz, int rw) {
+ if (rw) {
+ switch (sz) {
+ case 0: __sanitizer_unaligned_store16(p, 0); break;
+ case 1: __sanitizer_unaligned_store32(p, 0); break;
+ case 2: __sanitizer_unaligned_store64(p, 0); break;
+ default: exit(1);
+ }
+ } else {
+ switch (sz) {
+ case 0: __sanitizer_unaligned_load16(p); break;
+ case 1: __sanitizer_unaligned_load32(p); break;
+ case 2: __sanitizer_unaligned_load64(p); break;
+ default: exit(1);
+ }
+ }
+}
+
+static int accesssize(int sz) {
+ switch (sz) {
+ case 0: return 2;
+ case 1: return 4;
+ case 2: return 8;
+ }
+ exit(1);
+}
+
+template<int off, int off2>
+static void access3(bool main, int sz1, bool rw, char *p) {
+ p += off;
+ if (main) {
+ access(p, sz1, true);
+ } else {
+ p += off2;
+ if (rw) {
+ *p = 42;
+ } else {
+ if (*p == 42)
+ printf("bingo!\n");
+ }
+ }
+}
+
+template<int off>
+static void access2(bool main, int sz1, int off2, bool rw, char *obj) {
+ if (off2 == 0)
+ access3<off, 0>(main, sz1, rw, obj);
+ else if (off2 == 1)
+ access3<off, 1>(main, sz1, rw, obj);
+ else if (off2 == 2)
+ access3<off, 2>(main, sz1, rw, obj);
+ else if (off2 == 3)
+ access3<off, 3>(main, sz1, rw, obj);
+ else if (off2 == 4)
+ access3<off, 4>(main, sz1, rw, obj);
+ else if (off2 == 5)
+ access3<off, 5>(main, sz1, rw, obj);
+ else if (off2 == 6)
+ access3<off, 6>(main, sz1, rw, obj);
+ else if (off2 == 7)
+ access3<off, 7>(main, sz1, rw, obj);
+}
+
+static void access1(bool main, int off, int sz1, int off2, bool rw, char *obj) {
+ if (off == 0)
+ access2<0>(main, sz1, off2, rw, obj);
+ else if (off == 1)
+ access2<1>(main, sz1, off2, rw, obj);
+ else if (off == 2)
+ access2<2>(main, sz1, off2, rw, obj);
+ else if (off == 3)
+ access2<3>(main, sz1, off2, rw, obj);
+ else if (off == 4)
+ access2<4>(main, sz1, off2, rw, obj);
+ else if (off == 5)
+ access2<5>(main, sz1, off2, rw, obj);
+ else if (off == 6)
+ access2<6>(main, sz1, off2, rw, obj);
+ else if (off == 7)
+ access2<7>(main, sz1, off2, rw, obj);
+}
+
+void Test(bool main) {
+ uint64_t *obj = objs[0];
+ for (int off = 0; off < 8; off++) {
+ for (int sz1 = 0; sz1 < 3; sz1++) {
+ for (int off2 = 0; off2 < accesssize(sz1); off2++) {
+ for (int rw = 0; rw < 2; rw++) {
+ // printf("thr=%d off=%d sz1=%d off2=%d rw=%d p=%p\n",
+ // main, off, sz1, off2, rw, obj);
+ access1(main, off, sz1, off2, rw, (char*)obj);
+ obj += 2;
+ }
+ }
+ }
+ }
+}
+
+void *Thread(void *p) {
+ (void)p;
+ sleep(1);
+ Test(false);
+ return 0;
+}
+
+int main() {
+ pthread_t th;
+ pthread_create(&th, 0, Thread, 0);
+ Test(true);
+ pthread_join(th, 0);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: ThreadSanitizer: reported 224 warnings
diff --git a/lib/tsan/lit_tests/vptr_harmful_race.cc b/lib/tsan/lit_tests/vptr_harmful_race.cc
index f51ba7ee57f0..76d31c00ad4f 100644
--- a/lib/tsan/lit_tests/vptr_harmful_race.cc
+++ b/lib/tsan/lit_tests/vptr_harmful_race.cc
@@ -2,6 +2,7 @@
#include <pthread.h>
#include <semaphore.h>
#include <stdio.h>
+#include <unistd.h>
struct A {
A() {
@@ -34,6 +35,7 @@ void *Thread1(void *x) {
}
void *Thread2(void *x) {
+ sleep(1);
delete obj;
return NULL;
}
@@ -46,4 +48,4 @@ int main() {
pthread_join(t[1], NULL);
}
-// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: WARNING: ThreadSanitizer: data race on vptr
diff --git a/lib/tsan/lit_tests/vptr_harmful_race2.cc b/lib/tsan/lit_tests/vptr_harmful_race2.cc
new file mode 100644
index 000000000000..d7e1d19a11bd
--- /dev/null
+++ b/lib/tsan/lit_tests/vptr_harmful_race2.cc
@@ -0,0 +1,51 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdio.h>
+#include <unistd.h>
+
+struct A {
+ A() {
+ sem_init(&sem_, 0, 0);
+ }
+ virtual void F() {
+ }
+ void Done() {
+ sem_post(&sem_);
+ }
+ virtual ~A() {
+ sem_wait(&sem_);
+ sem_destroy(&sem_);
+ }
+ sem_t sem_;
+};
+
+struct B : A {
+ virtual void F() {
+ }
+ virtual ~B() { }
+};
+
+static A *obj = new B;
+
+void *Thread1(void *x) {
+ sleep(1);
+ obj->F();
+ obj->Done();
+ return NULL;
+}
+
+void *Thread2(void *x) {
+ delete obj;
+ return NULL;
+}
+
+int main() {
+ pthread_t t[2];
+ pthread_create(&t[0], NULL, Thread1, NULL);
+ pthread_create(&t[1], NULL, Thread2, NULL);
+ pthread_join(t[0], NULL);
+ pthread_join(t[1], NULL);
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race on vptr
diff --git a/lib/tsan/rtl/CMakeLists.txt b/lib/tsan/rtl/CMakeLists.txt
index d91e2e43ca4c..f1a8ff4d6558 100644
--- a/lib/tsan/rtl/CMakeLists.txt
+++ b/lib/tsan/rtl/CMakeLists.txt
@@ -37,22 +37,15 @@ if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE)
set(TSAN_ASM_SOURCES tsan_rtl_amd64.S)
# Pass ASM file directly to the C++ compiler.
set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C
- )
- add_library(clang_rt.tsan-x86_64 STATIC
- ${TSAN_SOURCES}
- ${TSAN_ASM_SOURCES}
- $<TARGET_OBJECTS:RTInterception.x86_64>
- $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>
- )
- set_target_compile_flags(clang_rt.tsan-x86_64
- ${TSAN_CFLAGS} ${TARGET_x86_64_CFLAGS}
- )
- list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-x86_64)
-endif()
-
-if(TSAN_RUNTIME_LIBRARIES)
- set_property(TARGET ${TSAN_RUNTIME_LIBRARIES} APPEND PROPERTY
- COMPILE_DEFINITIONS ${TSAN_COMMON_DEFINITIONS})
- add_clang_compiler_rt_libraries(${TSAN_RUNTIME_LIBRARIES})
+ LANGUAGE C)
+ set(arch "x86_64")
+ add_compiler_rt_static_runtime(clang_rt.tsan-${arch} ${arch}
+ SOURCES ${TSAN_SOURCES} ${TSAN_ASM_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ CFLAGS ${TSAN_CFLAGS}
+ DEFS ${TSAN_COMMON_DEFINITIONS}
+ SYMS tsan.syms)
+ list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch})
endif()
diff --git a/lib/tsan/rtl/Makefile.mk b/lib/tsan/rtl/Makefile.mk
index a6a7fc8b86e8..2687123f731d 100644
--- a/lib/tsan/rtl/Makefile.mk
+++ b/lib/tsan/rtl/Makefile.mk
@@ -19,7 +19,7 @@ Implementation := Generic
# FIXME: use automatic dependencies?
Dependencies := $(wildcard $(Dir)/*.h)
Dependencies += $(wildcard $(Dir)/../../interception/*.h)
-Dependencies += $(wildcard $(Dir)/../../interception/mach_override/*.h)
+Dependencies += $(wildcard $(Dir)/../../sanitizer_common/*.h)
# Define a convenience variable for all the tsan functions.
TsanFunctions += $(Sources:%.cc=%) $(AsmSources:%.S=%)
diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old
index f522ec6b47d7..33944ffe9675 100644
--- a/lib/tsan/rtl/Makefile.old
+++ b/lib/tsan/rtl/Makefile.old
@@ -1,16 +1,14 @@
CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+CLANG=clang
ifeq ($(DEBUG), 0)
CXXFLAGS += -O3
endif
-ifeq ($(CXX), clang++)
- CXXFLAGS+= -Wgnu
-endif
# For interception. FIXME: move interception one level higher.
INTERCEPTION=../../interception
COMMON=../../sanitizer_common
INCLUDES= -I../.. -I../../../include
-EXTRA_CXXFLAGS=-fno-exceptions
+EXTRA_CXXFLAGS=-fno-exceptions -fno-rtti
NO_SYSROOT=--sysroot=.
CXXFLAGS+=$(EXTRA_CXXFLAGS)
CXXFLAGS+=$(CFLAGS)
@@ -21,7 +19,7 @@ ifeq ($(CXX), g++)
endif # CXX=g++
endif # DEBUG=0
-ifeq ($(CXX), clang++)
+ifeq ($(CXX), $(CLANG)++)
# Global constructors are banned.
CXXFLAGS+=-Wglobal-constructors
endif
diff --git a/lib/tsan/rtl/tsan.syms b/lib/tsan/rtl/tsan.syms
new file mode 100644
index 000000000000..4464a0a231c9
--- /dev/null
+++ b/lib/tsan/rtl/tsan.syms
@@ -0,0 +1,5 @@
+{
+ __tsan_*;
+ __sanitizer_syscall_pre_*;
+ __sanitizer_syscall_post_*;
+};
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index e0c04733f0a3..7150e2e255d8 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -28,16 +28,19 @@ namespace __tsan {
const bool kGoMode = true;
const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
+// Go linker does not support weak symbols.
+#define CPP_WEAK
#else
const bool kGoMode = false;
const bool kCppMode = true;
const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
+#define CPP_WEAK WEAK
#endif
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
-const int kClkBits = 43;
+const int kClkBits = 42;
#ifndef TSAN_GO
const int kShadowStackSize = 4 * 1024;
const int kTraceStackSize = 256;
@@ -153,13 +156,13 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
struct ThreadState;
-struct ThreadContext;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
class StackTrace;
struct MBlock;
+struct Suppression;
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc
index ef375a4d98f6..14bdbb53b322 100644
--- a/lib/tsan/rtl/tsan_fd.cc
+++ b/lib/tsan/rtl/tsan_fd.cc
@@ -74,13 +74,14 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
uptr l1 = atomic_load(pl1, memory_order_consume);
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
- void *p = internal_alloc(MBlockFD, size);
+ // We need this to reside in user memory to properly catch races on it.
+ void *p = user_alloc(thr, pc, size);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
- internal_free(p);
+ user_free(thr, pc, p);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
@@ -150,7 +151,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
if (s)
Acquire(thr, pc, (uptr)s);
}
@@ -161,20 +162,20 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
if (s)
Release(thr, pc, (uptr)s);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
// To catch races between fd usage and close.
- MemoryWrite8Byte(thr, pc, (uptr)d);
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
MemoryResetRange(thr, pc, (uptr)d, 8);
@@ -193,7 +194,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
- MemoryRead8Byte(thr, pc, (uptr)od);
+ MemoryRead(thr, pc, (uptr)od, kSizeLog8);
FdClose(thr, pc, newfd);
init(thr, pc, newfd, ref(od->sync));
}
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 88c4bb6a2e44..c062592f482d 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -45,15 +45,19 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_thread_leaks = true;
f->report_destroy_locked = true;
f->report_signal_unsafe = true;
+ f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
f->strip_path_prefix = "";
f->suppressions = "";
+ f->print_suppressions = false;
+ f->print_benign = false;
f->exitcode = 66;
f->log_path = "stderr";
f->atexit_sleep_ms = 1000;
f->verbosity = 0;
f->profile_memory = "";
f->flush_memory_ms = 0;
+ f->flush_symbolizer_ms = 5000;
f->stop_on_start = false;
f->running_on_valgrind = false;
f->external_symbolizer_path = "";
@@ -72,15 +76,19 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
+ ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(env, &f->suppressions, "suppressions");
+ ParseFlag(env, &f->print_suppressions, "print_suppressions");
+ ParseFlag(env, &f->print_benign, "print_benign");
ParseFlag(env, &f->exitcode, "exitcode");
ParseFlag(env, &f->log_path, "log_path");
ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
ParseFlag(env, &f->verbosity, "verbosity");
ParseFlag(env, &f->profile_memory, "profile_memory");
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
+ ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(env, &f->history_size, "history_size");
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
index 6547911ec7a3..aaacd98a6223 100644
--- a/lib/tsan/rtl/tsan_flags.h
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -43,6 +43,8 @@ struct Flags {
// Report violations of async signal-safety
// (e.g. malloc() call from a signal handler).
bool report_signal_unsafe;
+ // Report races between atomic and plain memory accesses.
+ bool report_atomic_races;
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;
@@ -50,6 +52,10 @@ struct Flags {
const char *strip_path_prefix;
// Suppressions filename.
const char *suppressions;
+ // Print matched suppressions at exit.
+ bool print_suppressions;
+ // Print matched "benign" races at exit.
+ bool print_benign;
// Override exit status if something was reported.
int exitcode;
// Write logs to "log_path.pid".
@@ -65,6 +71,8 @@ struct Flags {
const char *profile_memory;
// Flush shadow memory every X ms.
int flush_memory_ms;
+ // Flush symbolizer caches every X ms.
+ int flush_symbolizer_ms;
// Stops on start until __tsan_resume() is called (for debugging).
bool stop_on_start;
// Controls whether RunningOnValgrind() returns true or false.
@@ -86,6 +94,6 @@ struct Flags {
Flags *flags();
void InitializeFlags(Flags *flags, const char *env);
-}
+} // namespace __tsan
#endif // TSAN_FLAGS_H
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index be58ca92cf91..f18b26f6abe4 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -10,11 +10,13 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// FIXME: move as many interceptors as possible into
-// sanitizer_common/sanitizer_common_interceptors.h
+// sanitizer_common/sanitizer_common_interceptors.inc
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "interception/interception.h"
@@ -26,18 +28,21 @@
using namespace __tsan; // NOLINT
-const int kSigCount = 128;
+const int kSigCount = 64;
struct my_siginfo_t {
- int opaque[128];
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
};
struct sigset_t {
- u64 val[1024 / 8 / sizeof(u64)];
+ // The size is determined by looking at sizeof of real sigset_t on linux.
+ u64 val[128 / sizeof(u64)];
};
struct ucontext_t {
- uptr opaque[117];
+ // The size is determined by looking at sizeof of real ucontext_t on linux.
+ u64 opaque[936 / sizeof(u64) + 1];
};
extern "C" int pthread_attr_init(void *attr);
@@ -53,9 +58,13 @@ extern "C" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset);
extern "C" int sigfillset(sigset_t *set);
extern "C" void *pthread_self();
extern "C" void _exit(int status);
-extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso);
extern "C" int *__errno_location();
extern "C" int fileno_unlocked(void *stream);
+extern "C" void *__libc_malloc(uptr size);
+extern "C" void *__libc_calloc(uptr size, uptr n);
+extern "C" void *__libc_realloc(void *ptr, uptr size);
+extern "C" void __libc_free(void *ptr);
+extern "C" int mallopt(int param, int value);
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
const int kPthreadAttrSize = 56;
@@ -83,11 +92,6 @@ typedef void (*sighandler_t)(int sig);
#define errno (*__errno_location())
-union pthread_attr_t {
- char size[kPthreadAttrSize];
- void *align;
-};
-
struct sigaction_t {
union {
sighandler_t sa_handler;
@@ -124,7 +128,7 @@ struct SignalContext {
int pending_signal_count;
SignalDesc pending_signals[kSigCount];
};
-}
+} // namespace __tsan
static SignalContext *SigCtx(ThreadState *thr) {
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
@@ -240,12 +244,15 @@ class AtExitContext {
typedef void(*atexit_t)();
- int atexit(ThreadState *thr, uptr pc, atexit_t f) {
+ int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
+ atexit_t f, void *arg) {
Lock l(&mtx_);
if (pos_ == kMaxAtExit)
return 1;
Release(thr, pc, (uptr)this);
stack_[pos_] = f;
+ args_[pos_] = arg;
+ is_on_exits_[pos_] = is_on_exit;
pos_++;
return 0;
}
@@ -254,11 +261,15 @@ class AtExitContext {
CHECK_EQ(thr->in_rtl, 0);
for (;;) {
atexit_t f = 0;
+ void *arg = 0;
+ bool is_on_exit = false;
{
Lock l(&mtx_);
if (pos_) {
pos_--;
f = stack_[pos_];
+ arg = args_[pos_];
+ is_on_exit = is_on_exits_[pos_];
ScopedInRtl in_rtl;
Acquire(thr, pc, (uptr)this);
}
@@ -267,7 +278,10 @@ class AtExitContext {
break;
DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
CHECK_EQ(thr->in_rtl, 0);
- f();
+ if (is_on_exit)
+ ((void(*)(int status, void *arg))f)(0, arg);
+ else
+ ((void(*)(void *arg, void *dso))f)(arg, 0);
}
}
@@ -275,42 +289,133 @@ class AtExitContext {
static const int kMaxAtExit = 128;
Mutex mtx_;
atexit_t stack_[kMaxAtExit];
+ void *args_[kMaxAtExit];
+ bool is_on_exits_[kMaxAtExit];
int pos_;
};
static AtExitContext *atexit_ctx;
-static void finalize(void *arg) {
- ThreadState * thr = cur_thread();
- uptr pc = 0;
- atexit_ctx->exit(thr, pc);
- {
- ScopedInRtl in_rtl;
- DestroyAndFree(atexit_ctx);
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(atexit, f);
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+}
+
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ if (dso)
+ return REAL(__cxa_atexit)(f, arg, dso);
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+}
+
+// Cleanup old bufs.
+static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp <= sp) {
+ uptr sz = thr->jmp_bufs.Size();
+ thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1];
+ thr->jmp_bufs.PopBack();
+ i--;
+ }
}
- int status = Finalize(cur_thread());
- if (status)
- _exit(status);
}
-TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
- SCOPED_TSAN_INTERCEPTOR(atexit, f);
- return atexit_ctx->atexit(thr, pc, f);
+static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
+ if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap
+ return;
+ // Cleanup old bufs.
+ JmpBufGarbageCollect(thr, sp);
+ // Remember the buf.
+ JmpBuf *buf = thr->jmp_bufs.PushBack();
+ buf->sp = sp;
+ buf->mangled_sp = mangled_sp;
+ buf->shadow_stack_pos = thr->shadow_stack_pos;
+}
+
+static void LongJmp(ThreadState *thr, uptr *env) {
+ uptr mangled_sp = env[6];
+ // Find the saved buf by mangled_sp.
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->mangled_sp == mangled_sp) {
+ CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
+ // Unwind the stack.
+ while (thr->shadow_stack_pos > buf->shadow_stack_pos)
+ FuncExit(thr);
+ JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
+ return;
+ }
+ }
+ Printf("ThreadSanitizer: can't find longjmp buf\n");
+ CHECK(0);
}
-TSAN_INTERCEPTOR(void, longjmp, void *env, int val) {
- SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
- Printf("ThreadSanitizer: longjmp() is not supported\n");
- Die();
+extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
+ ScopedInRtl in_rtl;
+ SetJmp(cur_thread(), sp, mangled_sp);
}
-TSAN_INTERCEPTOR(void, siglongjmp, void *env, int val) {
- SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
- Printf("ThreadSanitizer: siglongjmp() is not supported\n");
- Die();
+// Not called. Merely to satisfy TSAN_INTERCEPT().
+extern "C" int __interceptor_setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" int __interceptor__setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" int __interceptor_sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" int __interceptor___sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" int setjmp(void *env);
+extern "C" int _setjmp(void *env);
+extern "C" int sigsetjmp(void *env);
+extern "C" int __sigsetjmp(void *env);
+DEFINE_REAL(int, setjmp, void *env)
+DEFINE_REAL(int, _setjmp, void *env)
+DEFINE_REAL(int, sigsetjmp, void *env)
+DEFINE_REAL(int, __sigsetjmp, void *env)
+
+TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
+ {
+ SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(longjmp)(env, val);
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
+ {
+ SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(siglongjmp)(env, val);
}
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_malloc(size);
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(malloc, size);
@@ -326,17 +431,23 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
}
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_calloc(size, n);
+ if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n)) return 0;
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
p = user_alloc(thr, pc, n * size);
- if (p) internal_memset(p, 0, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
}
invoke_malloc_hook(p, n * size);
return p;
}
TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_realloc(p, size);
if (p)
invoke_free_hook(p);
{
@@ -350,6 +461,8 @@ TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
TSAN_INTERCEPTOR(void, free, void *p) {
if (p == 0)
return;
+ if (cur_thread()->in_symbolizer)
+ return __libc_free(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(free, p);
user_free(thr, pc, p);
@@ -358,12 +471,21 @@ TSAN_INTERCEPTOR(void, free, void *p) {
TSAN_INTERCEPTOR(void, cfree, void *p) {
if (p == 0)
return;
+ if (cur_thread()->in_symbolizer)
+ return __libc_free(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(cfree, p);
user_free(thr, pc, p);
}
+TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
+ SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
+ return user_alloc_usable_size(thr, pc, p);
+}
+
#define OPERATOR_NEW_BODY(mangled_name) \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_malloc(size); \
void *p = 0; \
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
@@ -387,6 +509,8 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
#define OPERATOR_DELETE_BODY(mangled_name) \
if (ptr == 0) return; \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_free(ptr); \
invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
user_free(thr, pc, ptr);
@@ -551,7 +675,9 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
return MAP_FAILED;
void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
if (res != MAP_FAILED) {
- MemoryResetRange(thr, pc, (uptr)res, sz);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
}
return res;
}
@@ -563,13 +689,16 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
return MAP_FAILED;
void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
if (res != MAP_FAILED) {
- MemoryResetRange(thr, pc, (uptr)res, sz);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
}
return res;
}
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ DontNeedShadowFor((uptr)addr, sz);
int res = REAL(munmap)(addr, sz);
return res;
}
@@ -681,21 +810,21 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_TSAN_INTERCEPTOR(pthread_create, th, attr, callback, param);
- pthread_attr_t myattr;
+ __sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
attr = &myattr;
}
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
- uptr stacksize = 0;
- pthread_attr_getstacksize(attr, &stacksize);
- // We place the huge ThreadState object into TLS, account for that.
- const uptr minstacksize = GetTlsSize() + 128*1024;
- if (stacksize < minstacksize) {
- DPrintf("ThreadSanitizer: stacksize %zu->%zu\n", stacksize, minstacksize);
- pthread_attr_setstacksize(attr, minstacksize);
- }
+
+#if defined(TSAN_DEBUG_OUTPUT)
+ int verbosity = (TSAN_DEBUG_OUTPUT);
+#else
+ int verbosity = 0;
+#endif
+ AdjustStackSizeLinux(attr, verbosity);
+
ThreadParam p;
p.callback = callback;
p.param = param;
@@ -960,14 +1089,14 @@ TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
- MemoryWrite1Byte(thr, pc, (uptr)b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
- MemoryWrite1Byte(thr, pc, (uptr)b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -975,9 +1104,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
- MemoryRead1Byte(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_wait)(b);
- MemoryRead1Byte(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
Acquire(thr, pc, (uptr)b);
}
@@ -1064,6 +1193,74 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
return res;
}
+TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
+ return REAL(__xstat)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
+ return REAL(__xstat)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
+ return REAL(__xstat64)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
+ return REAL(__xstat64)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
+ return REAL(__lxstat)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
+ return REAL(__lxstat)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
+ return REAL(__lxstat64)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
+ return REAL(__lxstat64)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(version, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(0, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(0, fd, buf);
+}
+
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
int fd = REAL(open)(name, flags, mode);
@@ -1179,6 +1376,22 @@ TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
return res;
}
+TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
+ int res = REAL(bind)(fd, addr, addrlen);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
+ SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
+ int res = REAL(listen)(fd, backlog);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
TSAN_INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
SCOPED_TSAN_INTERCEPTOR(accept, fd, addr, addrlen);
int fd2 = REAL(accept)(fd, addr, addrlen);
@@ -1225,6 +1438,18 @@ TSAN_INTERCEPTOR(int, __close, int fd) {
return REAL(__close)(fd);
}
+// glibc guts
+TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
+ SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ int fds[64];
+ int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++) {
+ if (fds[i] > 0)
+ FdClose(thr, pc, fds[i]);
+ }
+ REAL(__res_iclose)(state, free_addr);
+}
+
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
int res = REAL(pipe)(pipefd);
@@ -1373,6 +1598,17 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
return REAL(fwrite)(p, size, nmemb, f);
}
+TSAN_INTERCEPTOR(int, fflush, void *stream) {
+ SCOPED_TSAN_INTERCEPTOR(fflush, stream);
+ return REAL(fflush)(stream);
+}
+
+TSAN_INTERCEPTOR(void, abort, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(abort, fake);
+ REAL(fflush)(0);
+ REAL(abort)(fake);
+}
+
TSAN_INTERCEPTOR(int, puts, const char *s) {
SCOPED_TSAN_INTERCEPTOR(puts, s);
MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
@@ -1420,7 +1656,7 @@ TSAN_INTERCEPTOR(int, poll, void *fds, long_t nfds, int timeout) {
return res;
}
-static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
+void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
my_siginfo_t *info, void *ctx) {
ThreadState *thr = cur_thread();
SignalContext *sctx = SigCtx(thr);
@@ -1433,7 +1669,6 @@ static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
(sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
- CHECK(thr->in_rtl == 0 || thr->in_rtl == 1);
int in_rtl = thr->in_rtl;
thr->in_rtl = 0;
CHECK_EQ(thr->in_signal_handler, false);
@@ -1519,11 +1754,11 @@ TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
SignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
- if (pid == GetPid()) {
+ if (pid == (int)internal_getpid()) {
sctx->int_signal_send = sig;
}
int res = REAL(kill)(pid, sig);
- if (pid == GetPid()) {
+ if (pid == (int)internal_getpid()) {
CHECK_EQ(sctx->int_signal_send, sig);
sctx->int_signal_send = prev;
}
@@ -1600,6 +1835,13 @@ struct TsanInterceptorContext {
const uptr pc;
};
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+// Causes interceptor recursion (getpwuid_r() calls fopen())
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+// Causes interceptor recursion (glob64() calls lstat64())
+#undef SANITIZER_INTERCEPT_GLOB
+
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \
((TsanInterceptorContext*)ctx)->pc, \
@@ -1621,6 +1863,13 @@ struct TsanInterceptorContext {
ThreadSetName(((TsanInterceptorContext*)ctx)->thr, name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
+// FIXME: Implement these with MemoryAccessRange().
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+
namespace __tsan {
void ProcessPendingSignals(ThreadState *thr) {
@@ -1655,7 +1904,7 @@ void ProcessPendingSignals(ThreadState *thr) {
(uptr)sigactions[sig].sa_sigaction :
(uptr)sigactions[sig].sa_handler;
stack.Init(&pc, 1);
- Lock l(&ctx->thread_mtx);
+ ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack);
@@ -1671,6 +1920,16 @@ void ProcessPendingSignals(ThreadState *thr) {
thr->in_signal_handler = false;
}
+static void finalize(void *arg) {
+ ThreadState * thr = cur_thread();
+ uptr pc = 0;
+ atexit_ctx->exit(thr, pc);
+ int status = Finalize(cur_thread());
+ REAL(fflush)(0);
+ if (status)
+ _exit(status);
+}
+
static void unreachable() {
Printf("FATAL: ThreadSanitizer: unreachable called\n");
Die();
@@ -1684,8 +1943,16 @@ void InitializeInterceptors() {
REAL(memcpy) = internal_memcpy;
REAL(memcmp) = internal_memcmp;
+ // Instruct libc malloc to consume less memory.
+ mallopt(1, 0); // M_MXFAST
+ mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+
SANITIZER_COMMON_INTERCEPTORS_INIT;
+ TSAN_INTERCEPT(setjmp);
+ TSAN_INTERCEPT(_setjmp);
+ TSAN_INTERCEPT(sigsetjmp);
+ TSAN_INTERCEPT(__sigsetjmp);
TSAN_INTERCEPT(longjmp);
TSAN_INTERCEPT(siglongjmp);
@@ -1767,6 +2034,18 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(sem_post);
TSAN_INTERCEPT(sem_getvalue);
+ TSAN_INTERCEPT(stat);
+ TSAN_INTERCEPT(__xstat);
+ TSAN_INTERCEPT(stat64);
+ TSAN_INTERCEPT(__xstat64);
+ TSAN_INTERCEPT(lstat);
+ TSAN_INTERCEPT(__lxstat);
+ TSAN_INTERCEPT(lstat64);
+ TSAN_INTERCEPT(__lxstat64);
+ TSAN_INTERCEPT(fstat);
+ TSAN_INTERCEPT(__fxstat);
+ TSAN_INTERCEPT(fstat64);
+ TSAN_INTERCEPT(__fxstat64);
TSAN_INTERCEPT(open);
TSAN_INTERCEPT(open64);
TSAN_INTERCEPT(creat);
@@ -1781,11 +2060,15 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(socket);
TSAN_INTERCEPT(socketpair);
TSAN_INTERCEPT(connect);
+ TSAN_INTERCEPT(bind);
+ TSAN_INTERCEPT(listen);
TSAN_INTERCEPT(accept);
TSAN_INTERCEPT(accept4);
TSAN_INTERCEPT(epoll_create);
TSAN_INTERCEPT(epoll_create1);
TSAN_INTERCEPT(close);
+ TSAN_INTERCEPT(__close);
+ TSAN_INTERCEPT(__res_iclose);
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
@@ -1804,6 +2087,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(fclose);
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
+ TSAN_INTERCEPT(fflush);
+ TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(opendir);
@@ -1828,6 +2113,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(munlockall);
TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(on_exit);
+ TSAN_INTERCEPT(__cxa_atexit);
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
@@ -1835,7 +2122,7 @@ void InitializeInterceptors() {
atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
AtExitContext();
- if (__cxa_atexit(&finalize, 0, 0)) {
+ if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
}
diff --git a/lib/tsan/rtl/tsan_interface.cc b/lib/tsan/rtl/tsan_interface.cc
index 6d0954602ff7..efad8c192d6e 100644
--- a/lib/tsan/rtl/tsan_interface.cc
+++ b/lib/tsan/rtl/tsan_interface.cc
@@ -14,23 +14,73 @@
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
using namespace __tsan; // NOLINT
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+
void __tsan_init() {
Initialize(cur_thread());
}
void __tsan_read16(void *addr) {
- MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr);
- MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16(void *addr) {
- MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr);
- MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+u16 __tsan_unaligned_read2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
+ return *(u16*)addr;
+}
+
+u32 __tsan_unaligned_read4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
+ return *(u32*)addr;
+}
+
+u64 __tsan_unaligned_read8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
+ return *(u64*)addr;
+}
+
+void __tsan_unaligned_write2(void *addr, u16 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
+ *(u16*)addr = v;
+}
+
+void __tsan_unaligned_write4(void *addr, u32 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
+ *(u32*)addr = v;
+}
+
+void __tsan_unaligned_write8(void *addr, u64 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+ *(u64*)addr = v;
+}
+
+extern "C" {
+uint16_t __sanitizer_unaligned_load16(void *addr)
+ ALIAS("__tsan_unaligned_read2") SANITIZER_INTERFACE_ATTRIBUTE;
+uint32_t __sanitizer_unaligned_load32(void *addr)
+ ALIAS("__tsan_unaligned_read4") SANITIZER_INTERFACE_ATTRIBUTE;
+uint64_t __sanitizer_unaligned_load64(void *addr)
+ ALIAS("__tsan_unaligned_read8") SANITIZER_INTERFACE_ATTRIBUTE;
+void __sanitizer_unaligned_store16(void *addr, uint16_t v)
+ ALIAS("__tsan_unaligned_write2") SANITIZER_INTERFACE_ATTRIBUTE;
+void __sanitizer_unaligned_store32(void *addr, uint32_t v)
+ ALIAS("__tsan_unaligned_write4") SANITIZER_INTERFACE_ATTRIBUTE;
+void __sanitizer_unaligned_store64(void *addr, uint64_t v)
+ ALIAS("__tsan_unaligned_write8") SANITIZER_INTERFACE_ATTRIBUTE;
}
void __tsan_acquire(void *addr) {
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index 7480fc893f2d..457fb55e0d2d 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -16,7 +16,7 @@
#ifndef TSAN_INTERFACE_H
#define TSAN_INTERFACE_H
-#include <sanitizer/common_interface_defs.h>
+#include <sanitizer_common/sanitizer_internal_defs.h>
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -41,6 +41,14 @@ void __tsan_write4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
void __tsan_write8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
void __tsan_write16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+u16 __tsan_unaligned_read2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+u32 __tsan_unaligned_read4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+u64 __tsan_unaligned_read8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_unaligned_write2(void *addr, u16 v) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_unaligned_write4(void *addr, u32 v) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_unaligned_write8(void *addr, u64 v) SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_vptr_read(void **vptr_p) SANITIZER_INTERFACE_ATTRIBUTE;
void __tsan_vptr_update(void **vptr_p, void *new_val)
SANITIZER_INTERFACE_ATTRIBUTE;
diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc
index 51ebbf2266dd..04b4b455d15e 100644
--- a/lib/tsan/rtl/tsan_interface_ann.cc
+++ b/lib/tsan/rtl/tsan_interface_ann.cc
@@ -20,6 +20,7 @@
#include "tsan_mman.h"
#include "tsan_flags.h"
#include "tsan_platform.h"
+#include "tsan_vector.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
@@ -67,6 +68,7 @@ struct ExpectRace {
ExpectRace *next;
ExpectRace *prev;
int hitcount;
+ int addcount;
uptr addr;
uptr size;
char *file;
@@ -91,16 +93,19 @@ static void AddExpectRace(ExpectRace *list,
char *f, int l, uptr addr, uptr size, char *desc) {
ExpectRace *race = list->next;
for (; race != list; race = race->next) {
- if (race->addr == addr && race->size == size)
+ if (race->addr == addr && race->size == size) {
+ race->addcount++;
return;
+ }
}
race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
- race->hitcount = 0;
race->addr = addr;
race->size = size;
race->file = f;
race->line = l;
race->desc[0] = 0;
+ race->hitcount = 0;
+ race->addcount = 1;
if (desc) {
int i = 0;
for (; i < kMaxDescLen - 1 && desc[i]; i++)
@@ -155,6 +160,68 @@ bool IsExpectedReport(uptr addr, uptr size) {
return false;
}
+static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
+ int *unique_count, int *hit_count, int ExpectRace::*counter) {
+ ExpectRace *list = &dyn_ann_ctx->benign;
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ (*unique_count)++;
+ if (race->*counter == 0)
+ continue;
+ (*hit_count) += race->*counter;
+ uptr i = 0;
+ for (; i < matched->Size(); i++) {
+ ExpectRace *race0 = &(*matched)[i];
+ if (race->line == race0->line
+ && internal_strcmp(race->file, race0->file) == 0
+ && internal_strcmp(race->desc, race0->desc) == 0) {
+ race0->*counter += race->*counter;
+ break;
+ }
+ }
+ if (i == matched->Size())
+ matched->PushBack(*race);
+ }
+}
+
+void PrintMatchedBenignRaces() {
+ Lock lock(&dyn_ann_ctx->mtx);
+ int unique_count = 0;
+ int hit_count = 0;
+ int add_count = 0;
+ Vector<ExpectRace> hit_matched(MBlockScopedBuf);
+ CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
+ &ExpectRace::hitcount);
+ Vector<ExpectRace> add_matched(MBlockScopedBuf);
+ CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
+ &ExpectRace::addcount);
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
+ hit_count, (int)internal_getpid());
+ for (uptr i = 0; i < hit_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ hit_matched[i].hitcount, hit_matched[i].file,
+ hit_matched[i].line, hit_matched[i].desc);
+ }
+ }
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
+ " (pid=%d):\n",
+ add_count, unique_count, (int)internal_getpid());
+ for (uptr i = 0; i < add_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ add_matched[i].addcount, add_matched[i].file,
+ add_matched[i].line, add_matched[i].desc);
+ }
+ }
+}
+
+static void ReportMissedExpectedRace(ExpectRace *race) {
+ Printf("==================\n");
+ Printf("WARNING: ThreadSanitizer: missed expected data race\n");
+ Printf(" %s addr=%zx %s:%d\n",
+ race->desc, race->addr, race->file, race->line);
+ Printf("==================\n");
+}
} // namespace __tsan
using namespace __tsan; // NOLINT
@@ -237,14 +304,6 @@ void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
SCOPED_ANNOTATION(AnnotateNoOp);
}
-static void ReportMissedExpectedRace(ExpectRace *race) {
- Printf("==================\n");
- Printf("WARNING: ThreadSanitizer: missed expected data race\n");
- Printf(" %s addr=%zx %s:%d\n",
- race->desc, race->addr, race->file, race->line);
- Printf("==================\n");
-}
-
void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
Lock lock(&dyn_ann_ctx->mtx);
@@ -357,6 +416,9 @@ void INTERFACE_ATTRIBUTE AnnotateThreadName(
ThreadSetName(thr, name);
}
+// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
+// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
+// atomic operations, which should be handled by ThreadSanitizer correctly.
void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
SCOPED_ANNOTATION(AnnotateHappensBefore);
}
@@ -368,6 +430,7 @@ void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
char *f, int l, uptr mem, uptr sz, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, 1, desc);
}
int INTERFACE_ATTRIBUTE RunningOnValgrind() {
diff --git a/lib/tsan/rtl/tsan_interface_ann.h b/lib/tsan/rtl/tsan_interface_ann.h
index ed809073327e..8e45328e7ec1 100644
--- a/lib/tsan/rtl/tsan_interface_ann.h
+++ b/lib/tsan/rtl/tsan_interface_ann.h
@@ -14,7 +14,7 @@
#ifndef TSAN_INTERFACE_ANN_H
#define TSAN_INTERFACE_ANN_H
-#include <sanitizer/common_interface_defs.h>
+#include <sanitizer_common/sanitizer_internal_defs.h>
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index a9d75e5bf76c..80266969849a 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -20,25 +20,42 @@
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_interface_atomic.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \
+ mo = ConvertOrder(mo); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, __FUNCTION__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
class ScopedAtomic {
public:
ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
: thr_(thr) {
- CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
+ CHECK_EQ(thr_->in_rtl, 0);
+ ProcessPendingSignals(thr);
+ FuncEntry(thr_, pc);
DPrintf("#%d: %s\n", thr_->tid, func);
+ thr_->in_rtl++;
}
~ScopedAtomic() {
- CHECK_EQ(thr_->in_rtl, 1);
+ thr_->in_rtl--;
+ CHECK_EQ(thr_->in_rtl, 0);
+ FuncExit(thr_);
}
private:
ThreadState *thr_;
- ScopedInRtl in_rtl_;
};
// Some shortcuts.
@@ -212,16 +229,19 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
}
#endif
-#define SCOPED_ATOMIC(func, ...) \
- mo = ConvertOrder(mo); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
- ThreadState *const thr = cur_thread(); \
- ProcessPendingSignals(thr); \
- const uptr pc = (uptr)__builtin_return_address(0); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, pc, __FUNCTION__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
+template<typename T>
+static int SizeLog() {
+ if (sizeof(T) <= 1)
+ return kSizeLog1;
+ else if (sizeof(T) <= 2)
+ return kSizeLog2;
+ else if (sizeof(T) <= 4)
+ return kSizeLog4;
+ else
+ return kSizeLog8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
@@ -229,14 +249,17 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
+ if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return *a;
+ }
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.acquire(&s->clock);
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
@@ -244,6 +267,7 @@ template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@@ -265,16 +289,21 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
- else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
- else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ if (IsAcqRelOrder(mo))
+ thr->clock.acq_rel(&s->clock);
+ else if (IsReleaseOrder(mo))
+ thr->clock.release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.acquire(&s->clock);
+ }
v = F(a, v);
- s->mtx.Unlock();
+ if (s)
+ s->mtx.Unlock();
return v;
}
@@ -324,17 +353,22 @@ template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
- else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
- else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ if (IsAcqRelOrder(mo))
+ thr->clock.acq_rel(&s->clock);
+ else if (IsReleaseOrder(mo))
+ thr->clock.release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.acquire(&s->clock);
+ }
T cc = *c;
T pr = func_cas(a, cc, v);
- s->mtx.Unlock();
+ if (s)
+ s->mtx.Unlock();
if (pr == cc)
return true;
*c = pr;
diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h
index 8a92155d57ef..0187e49d96e5 100644
--- a/lib/tsan/rtl/tsan_interface_inl.h
+++ b/lib/tsan/rtl/tsan_interface_inl.h
@@ -19,41 +19,53 @@
using namespace __tsan; // NOLINT
void __tsan_read1(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
}
void __tsan_read2(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
}
void __tsan_read4(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
}
void __tsan_read8(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
}
void __tsan_write1(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
}
void __tsan_write2(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
}
void __tsan_write4(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
}
void __tsan_write8(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
CHECK_EQ(sizeof(vptr_p), 8);
- if (*vptr_p != new_val)
- MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, 3, 1);
+ if (*vptr_p != new_val) {
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+ }
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
}
void __tsan_func_entry(void *pc) {
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index e425c75800be..71e0747c3646 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -152,7 +152,7 @@ SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
return 0;
}
-} // namespace __tsan {
+} // namespace __tsan
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
@@ -271,6 +271,7 @@ void __tsan_java_mutex_lock(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ MutexCreate(thr, pc, addr, true, true, true);
MutexLock(thr, pc, addr);
}
@@ -291,6 +292,7 @@ void __tsan_java_mutex_read_lock(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ MutexCreate(thr, pc, addr, true, true, true);
MutexReadLock(thr, pc, addr);
}
@@ -303,3 +305,25 @@ void __tsan_java_mutex_read_unlock(jptr addr) {
MutexReadUnlock(thr, pc, addr);
}
+
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ CHECK_GT(rec, 0);
+
+ MutexCreate(thr, pc, addr, true, true, true);
+ MutexLock(thr, pc, addr, rec);
+}
+
+int __tsan_java_mutex_unlock_rec(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ return MutexUnlock(thr, pc, addr, true);
+}
diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h
index 241483aaa015..9ac78e074bbe 100644
--- a/lib/tsan/rtl/tsan_interface_java.h
+++ b/lib/tsan/rtl/tsan_interface_java.h
@@ -55,8 +55,7 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
-// Must not be called on recursive reentry.
-// Object.wait() is handled as a pair of unlock/lock.
+// Can be called on recursive reentry.
void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
// Mutex unlock.
void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
@@ -64,6 +63,16 @@ void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
// Mutex read unlock.
void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Recursive mutex lock, intended for handling of Object.wait().
+// The 'rec' value must be obtained from the previous
+// __tsan_java_mutex_unlock_rec().
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
+// Recursive mutex unlock, intended for handling of Object.wait().
+// The return value says how many times this thread called lock()
+// w/o a pairing unlock() (i.e. how many recursive levels it unlocked).
+// It must be passed back to __tsan_java_mutex_lock_rec() to restore
+// the same recursion level.
+int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
#ifdef __cplusplus
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_md5.cc b/lib/tsan/rtl/tsan_md5.cc
index c9d671f5b599..66e824043153 100644
--- a/lib/tsan/rtl/tsan_md5.cc
+++ b/lib/tsan/rtl/tsan_md5.cc
@@ -242,4 +242,4 @@ MD5Hash md5_hash(const void *data, uptr size) {
MD5_Final((unsigned char*)&res.hash[0], &ctx);
return res;
}
-}
+} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 82f7105d60db..b6671b1abf09 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -29,6 +29,41 @@ extern "C" void WEAK __tsan_free_hook(void *ptr) {
namespace __tsan {
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+void MBlock::Lock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ for (int iter = 0;; iter++) {
+ if (v & 1) {
+ if (iter < 10)
+ proc_yield(20);
+ else
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_relaxed);
+ continue;
+ }
+ if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
+ break;
+ }
+}
+
+void MBlock::Unlock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ DCHECK(v & 1);
+ atomic_store(a, v & ~1, memory_order_relaxed);
+}
+
+struct MapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ DontNeedShadowFor(p, size);
+ }
+};
+
static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
Allocator *allocator() {
return reinterpret_cast<Allocator*>(&allocator_placeholder);
@@ -38,8 +73,16 @@ void InitializeAllocator() {
allocator()->Init();
}
-void AlloctorThreadFinish(ThreadState *thr) {
- allocator()->SwallowCache(&thr->alloc_cache);
+void AllocatorThreadStart(ThreadState *thr) {
+ allocator()->InitCache(&thr->alloc_cache);
+}
+
+void AllocatorThreadFinish(ThreadState *thr) {
+ allocator()->DestroyCache(&thr->alloc_cache);
+}
+
+void AllocatorPrintStats() {
+ allocator()->PrintStats();
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
@@ -48,7 +91,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
Context *ctx = CTX();
StackTrace stack;
stack.ObtainCurrent(thr, pc);
- Lock l(&ctx->thread_mtx);
+ ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
rep.AddStack(&stack);
@@ -58,17 +101,15 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
CHECK_GT(thr->in_rtl, 0);
+ if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
+ return 0;
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->size = sz;
- b->head = 0;
- b->alloc_tid = thr->unique_id;
- b->alloc_stack_id = CurrentStackId(thr, pc);
- if (CTX() && CTX()->initialized) {
+ b->Init(sz, thr->tid, CurrentStackId(thr, pc));
+ if (CTX() && CTX()->initialized)
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- }
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
SignalUnsafeCall(thr, pc);
return p;
@@ -79,9 +120,9 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
CHECK_NE(p, (void*)0);
DPrintf("#%d: free(%p)\n", thr->tid, p);
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->head) {
- Lock l(&b->mtx);
- for (SyncVar *s = b->head; s;) {
+ if (b->ListHead()) {
+ MBlock::ScopedLock l(b);
+ for (SyncVar *s = b->ListHead(); s;) {
SyncVar *res = s;
s = s->next;
StatInc(thr, StatSyncDestroyed);
@@ -89,12 +130,10 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
res->mtx.Unlock();
DestroyAndFree(res);
}
- b->head = 0;
+ b->ListReset();
}
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
- MemoryRangeFreed(thr, pc, (uptr)p, b->size);
- }
- b->~MBlock();
+ if (CTX() && CTX()->initialized && thr->in_rtl == 1)
+ MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc);
}
@@ -110,20 +149,29 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
return 0;
if (p) {
MBlock *b = user_mblock(thr, p);
- internal_memcpy(p2, p, min(b->size, sz));
+ CHECK_NE(b, 0);
+ internal_memcpy(p2, p, min(b->Size(), sz));
}
}
- if (p) {
+ if (p)
user_free(thr, pc, p);
- }
return p2;
}
+uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
+ CHECK_GT(thr->in_rtl, 0);
+ if (p == 0)
+ return 0;
+ MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+ return b ? b->Size() : 0;
+}
+
MBlock *user_mblock(ThreadState *thr, void *p) {
- CHECK_NE(p, (void*)0);
+ CHECK_NE(p, 0);
Allocator *a = allocator();
void *b = a->GetBlockBegin(p);
- CHECK_NE(b, 0);
+ if (b == 0)
+ return 0;
return (MBlock*)a->GetMetaData(b);
}
@@ -164,3 +212,54 @@ void internal_free(void *p) {
}
} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+uptr __tsan_get_current_allocated_bytes() {
+ u64 stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ u64 m = stats[AllocatorStatMalloced];
+ u64 f = stats[AllocatorStatFreed];
+ return m >= f ? m - f : 1;
+}
+
+uptr __tsan_get_heap_size() {
+ u64 stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ u64 m = stats[AllocatorStatMmapped];
+ u64 f = stats[AllocatorStatUnmapped];
+ return m >= f ? m - f : 1;
+}
+
+uptr __tsan_get_free_bytes() {
+ return 1;
+}
+
+uptr __tsan_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __tsan_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+bool __tsan_get_ownership(void *p) {
+ return allocator()->GetBlockBegin(p) != 0;
+}
+
+uptr __tsan_get_allocated_size(void *p) {
+ if (p == 0)
+ return 0;
+ p = allocator()->GetBlockBegin(p);
+ if (p == 0)
+ return 0;
+ MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+ return b->Size();
+}
+
+void __tsan_on_thread_idle() {
+ ThreadState *thr = cur_thread();
+ allocator()->SwallowCache(&thr->alloc_cache);
+}
+} // extern "C"
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 5cf00eac8d03..19d555437f3e 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -20,7 +20,9 @@ namespace __tsan {
const uptr kDefaultAlignment = 16;
void InitializeAllocator();
-void AlloctorThreadFinish(ThreadState *thr);
+void AllocatorThreadStart(ThreadState *thr);
+void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
@@ -29,6 +31,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
void user_free(ThreadState *thr, uptr pc, void *p);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
// Given the pointer p into a valid allocated block,
// returns the descriptor of the block.
MBlock *user_mblock(ThreadState *thr, void *p);
@@ -60,6 +63,7 @@ enum MBlockType {
MBlockExpectRace,
MBlockSignal,
MBlockFD,
+ MBlockJmpBuf,
// This must be the last.
MBlockTypeCount
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
index 335ca2211d13..a92fd90fd9c1 100644
--- a/lib/tsan/rtl/tsan_mutex.cc
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -31,8 +31,8 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeMBlock,
- MutexTypeJavaMBlock},
+ /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ MutexTypeMBlock, MutexTypeJavaMBlock},
/*4 MutexTypeSyncVar*/ {},
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 09223ff6cc48..eebfd4d70a14 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -22,7 +22,7 @@ class MutexSet {
public:
// Holds limited number of mutexes.
// The oldest mutexes are discarded on overflow.
- static const uptr kMaxSize = 64;
+ static const uptr kMaxSize = 16;
struct Desc {
u64 id;
u64 epoch;
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index c859c3e85b19..666b4d0c482f 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -37,9 +37,9 @@ C++ COMPAT linux memory layout:
Go linux and darwin memory layout:
0000 0000 0000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
-00f8 0000 0000 - 0118 0000 0000: heap
-0118 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1460 0000 0000: shadow
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1380 0000 0000: shadow
1460 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7fff ffff ffff: -
@@ -47,8 +47,8 @@ Go linux and darwin memory layout:
Go windows memory layout:
0000 0000 0000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
-00f8 0000 0000 - 0118 0000 0000: heap
-0118 0000 0000 - 0100 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
0100 0000 0000 - 0560 0000 0000: shadow
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07ff ffff ffff: -
@@ -65,11 +65,11 @@ namespace __tsan {
#if defined(TSAN_GO)
static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x00fcffffffffULL;
-# if defined(_WIN32)
+static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
+# if SANITIZER_WINDOWS
static const uptr kLinuxShadowMsk = 0x010000000000ULL;
# else
-static const uptr kLinuxShadowMsk = 0x100000000000ULL;
+static const uptr kLinuxShadowMsk = 0x200000000000ULL;
# endif
// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
// when memory addresses are of the 0x2axxxxxxxxxx form.
@@ -84,7 +84,7 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
-#if defined(_WIN32)
+#if SANITIZER_WINDOWS
const uptr kTraceMemBegin = 0x056000000000ULL;
#else
const uptr kTraceMemBegin = 0x600000000000ULL;
@@ -132,13 +132,19 @@ static inline uptr AlternativeAddress(uptr addr) {
#endif
}
-uptr GetShadowMemoryConsumption();
void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size);
const char *InitializePlatform();
void FinalizePlatform();
-uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
+uptr ALWAYS_INLINE GetThreadTrace(int tid) {
+ uptr p = kTraceMemBegin + (uptr)(tid * 2) * kTraceSize * sizeof(Event);
+ DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ return p;
+}
+
+uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
+ uptr p = kTraceMemBegin + (uptr)(tid * 2 + 1) * kTraceSize * sizeof(Event);
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
return p;
}
@@ -148,9 +154,7 @@ void internal_start_thread(void(*func)(void*), void *arg);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
bool IsGlobalVar(uptr addr);
-uptr GetTlsSize();
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size);
+int ExtractResolvFDs(void *state, int *fds, int nfd);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index 6cc424975125..a0d71e8589d6 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -12,7 +12,9 @@
// Linux-specific code.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -21,7 +23,6 @@
#include "tsan_rtl.h"
#include "tsan_flags.h"
-#include <asm/prctl.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
@@ -40,11 +41,16 @@
#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
+#define __need_res_state
+#include <resolv.h>
+#include <malloc.h>
-extern "C" int arch_prctl(int code, __sanitizer::uptr *addr);
+extern "C" struct mallinfo __libc_mallinfo();
namespace __tsan {
+const uptr kPageSize = 4096;
+
#ifndef TSAN_GO
ScopedInRtl::ScopedInRtl()
: thr_(cur_thread()) {
@@ -66,8 +72,75 @@ ScopedInRtl::~ScopedInRtl() {
}
#endif
-uptr GetShadowMemoryConsumption() {
- return 0;
+static bool ishex(char c) {
+ return (c >= '0' && c <= '9')
+ || (c >= 'a' && c <= 'f');
+}
+
+static uptr readhex(const char *p) {
+ uptr v = 0;
+ for (; ishex(p[0]); p++) {
+ if (p[0] >= '0' && p[0] <= '9')
+ v = v * 16 + p[0] - '0';
+ else
+ v = v * 16 + p[0] - 'a' + 10;
+ }
+ return v;
+}
+
+static uptr readdec(const char *p) {
+ uptr v = 0;
+ for (; p[0] >= '0' && p[0] <= '9' ; p++)
+ v = v * 10 + p[0] - '0';
+ return v;
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size) {
+ char *smaps = 0;
+ uptr smaps_cap = 0;
+ uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
+ &smaps, &smaps_cap, 64<<20);
+ uptr mem[6] = {};
+ uptr total = 0;
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (ishex(pos[0])) {
+ start = readhex(pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ for (; *pos < '0' || *pos > '9'; pos++) {}
+ uptr rss = readdec(pos) * 1024;
+ total += rss;
+ start >>= 40;
+ if (start < 0x10) // shadow
+ mem[0] += rss;
+ else if (start >= 0x20 && start < 0x30) // compat modules
+ mem[file ? 1 : 2] += rss;
+ else if (start >= 0x7e) // modules
+ mem[file ? 1 : 2] += rss;
+ else if (start >= 0x60 && start < 0x62) // traces
+ mem[3] += rss;
+ else if (start >= 0x7d && start < 0x7e) // heap
+ mem[4] += rss;
+ else // other
+ mem[5] += rss;
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+ char *buf_pos = buf;
+ char *buf_end = buf + buf_size;
+ buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
+ "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
+ total >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
+ mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
+ struct mallinfo mi = __libc_mallinfo();
+ buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
+ "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
+ mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
}
void FlushShadowMemory() {
@@ -89,6 +162,63 @@ static void ProtectRange(uptr beg, uptr end) {
#endif
#ifndef TSAN_GO
+// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Accesses to .rodata can't race, so this saves time, memory and trace space.
+static void MapRodata() {
+ // First create temp file.
+ const char *tmpdir = GetEnv("TMPDIR");
+ if (tmpdir == 0)
+ tmpdir = GetEnv("TEST_TMPDIR");
+#ifdef P_tmpdir
+ if (tmpdir == 0)
+ tmpdir = P_tmpdir;
+#endif
+ if (tmpdir == 0)
+ return;
+ char filename[256];
+ internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ tmpdir, (int)internal_getpid());
+ uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (internal_iserror(openrv))
+ return;
+ fd_t fd = openrv;
+ // Fill the file with kShadowRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
+ InternalScopedBuffer<u64> marker(kMarkerSize);
+ for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ *p = kShadowRodata;
+ internal_write(fd, marker.data(), marker.size());
+ // Map the file into memory.
+ uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
+ if (internal_iserror(page)) {
+ internal_close(fd);
+ internal_unlink(filename);
+ return;
+ }
+ // Map the file into shadow of .rodata sections.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr start, end, offset, prot;
+ char name[128];
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
+ if (name[0] != 0 && name[0] != '['
+ && (prot & MemoryMappingLayout::kProtectionRead)
+ && (prot & MemoryMappingLayout::kProtectionExecute)
+ && !(prot & MemoryMappingLayout::kProtectionWrite)
+ && IsAppMem(start)) {
+ // Assume it's .rodata
+ char *shadow_start = (char*)MemToShadow(start);
+ char *shadow_end = (char*)MemToShadow(end);
+ for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
+ internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ }
+ }
+ }
+ internal_close(fd);
+ internal_unlink(filename);
+}
+
void InitializeShadowMemory() {
uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
kLinuxShadowEnd - kLinuxShadowBeg);
@@ -115,6 +245,8 @@ void InitializeShadowMemory() {
kLinuxAppMemBeg, kLinuxAppMemEnd,
(kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
DPrintf("stack %zx\n", (uptr)&shadow);
+
+ MapRodata();
}
#endif
@@ -124,10 +256,11 @@ static uptr g_data_end;
#ifndef TSAN_GO
static void CheckPIE() {
// Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(true);
uptr start, end;
if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0)) {
+ /*offset*/0, /*filename*/0, /*filename_size*/0,
+ /*protection*/0)) {
if ((u64)start < kLinuxAppMemBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
"something is mapped at 0x%zx < 0x%zx)\n",
@@ -140,11 +273,12 @@ static void CheckPIE() {
}
static void InitDataSeg() {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
bool prev_is_data = false;
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name))) {
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
DPrintf("%p-%p %p %s\n", start, end, offset, name);
bool is_data = offset != 0 && name[0] != 0;
// BSS may get merged with [heap] in /proc/self/maps. This is not very
@@ -163,27 +297,6 @@ static void InitDataSeg() {
CHECK_LT((uptr)&g_data_start, g_data_end);
}
-static uptr g_tls_size;
-
-#ifdef __i386__
-# define INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
-#else
-# define INTERNAL_FUNCTION
-#endif
-
-static int InitTlsSize() {
- typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION;
- get_tls_func get_tls;
- void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
- CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
- internal_memcpy(&get_tls, &get_tls_static_info_ptr,
- sizeof(get_tls_static_info_ptr));
- CHECK_NE(get_tls, 0);
- size_t tls_size = 0;
- size_t tls_align = 0;
- get_tls(&tls_size, &tls_align);
- return tls_size;
-}
#endif // #ifndef TSAN_GO
static rlim_t getlim(int res) {
@@ -238,57 +351,29 @@ const char *InitializePlatform() {
#ifndef TSAN_GO
CheckPIE();
- g_tls_size = (uptr)InitTlsSize();
+ InitTlsSize();
InitDataSeg();
#endif
- return getenv(kTsanOptionsEnv);
-}
-
-void FinalizePlatform() {
- fflush(0);
+ return GetEnv(kTsanOptionsEnv);
}
-uptr GetTlsSize() {
-#ifndef TSAN_GO
- return g_tls_size;
-#else
- return 0;
-#endif
+bool IsGlobalVar(uptr addr) {
+ return g_data_start && addr >= g_data_start && addr < g_data_end;
}
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
#ifndef TSAN_GO
- arch_prctl(ARCH_GET_FS, tls_addr);
- *tls_addr -= g_tls_size;
- *tls_size = g_tls_size;
-
- uptr stack_top, stack_bottom;
- GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
- *stk_addr = stack_bottom;
- *stk_size = stack_top - stack_bottom;
-
- if (!main) {
- // If stack and tls intersect, make them non-intersecting.
- if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
- CHECK_GT(*tls_addr + *tls_size, *stk_addr);
- CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
- *stk_size -= *tls_size;
- *tls_addr = *stk_addr + *stk_size;
- }
+int ExtractResolvFDs(void *state, int *fds, int nfd) {
+ int cnt = 0;
+ __res_state *statp = (__res_state*)state;
+ for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+ if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+ fds[cnt++] = statp->_u._ext.nssocks[i];
}
-#else
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-#endif
+ return cnt;
}
+#endif
-bool IsGlobalVar(uptr addr) {
- return g_data_start && addr >= g_data_start && addr < g_data_end;
-}
} // namespace __tsan
-#endif // #ifdef __linux__
+#endif // SANITIZER_LINUX
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 183061d14638..99d4533a4fa2 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -12,7 +12,8 @@
// Mac-specific code.
//===----------------------------------------------------------------------===//
-#ifdef __APPLE__
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -82,25 +83,13 @@ const char *InitializePlatform() {
setrlimit(RLIMIT_CORE, (rlimit*)&lim);
}
- return getenv(kTsanOptionsEnv);
+ return GetEnv(kTsanOptionsEnv);
}
void FinalizePlatform() {
fflush(0);
}
-uptr GetTlsSize() {
- return 0;
-}
-
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-}
-
} // namespace __tsan
-#endif // #ifdef __APPLE__
+#endif // SANITIZER_MAC
diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc
index f23e84e7875d..711db72ce684 100644
--- a/lib/tsan/rtl/tsan_platform_windows.cc
+++ b/lib/tsan/rtl/tsan_platform_windows.cc
@@ -12,7 +12,8 @@
// Windows-specific code.
//===----------------------------------------------------------------------===//
-#ifdef _WIN32
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
#include "tsan_platform.h"
@@ -34,25 +35,13 @@ void FlushShadowMemory() {
}
const char *InitializePlatform() {
- return getenv(kTsanOptionsEnv);
+ return GetEnv(kTsanOptionsEnv);
}
void FinalizePlatform() {
fflush(0);
}
-uptr GetTlsSize() {
- return 0;
-}
-
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-}
-
} // namespace __tsan
-#endif // #ifdef _WIN32
+#endif // SANITIZER_WINDOWS
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 056dc97387b9..c95c5c86be69 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -22,7 +22,8 @@ ReportDesc::ReportDesc()
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
, threads(MBlockReportThread)
- , sleep() {
+ , sleep()
+ , count() {
}
ReportMop::ReportMop()
@@ -43,23 +44,22 @@ const char *thread_name(char *buf, int tid) {
return buf;
}
-static void PrintHeader(ReportType typ) {
- Printf("WARNING: ThreadSanitizer: ");
-
+static const char *ReportTypeString(ReportType typ) {
if (typ == ReportTypeRace)
- Printf("data race");
- else if (typ == ReportTypeUseAfterFree)
- Printf("heap-use-after-free");
- else if (typ == ReportTypeThreadLeak)
- Printf("thread leak");
- else if (typ == ReportTypeMutexDestroyLocked)
- Printf("destroy of a locked mutex");
- else if (typ == ReportTypeSignalUnsafe)
- Printf("signal-unsafe call inside of a signal");
- else if (typ == ReportTypeErrnoInSignal)
- Printf("signal handler spoils errno");
-
- Printf(" (pid=%d)\n", GetPid());
+ return "data race";
+ if (typ == ReportTypeVptrRace)
+ return "data race on vptr (ctor/dtor vs virtual call)";
+ if (typ == ReportTypeUseAfterFree)
+ return "heap-use-after-free";
+ if (typ == ReportTypeThreadLeak)
+ return "thread leak";
+ if (typ == ReportTypeMutexDestroyLocked)
+ return "destroy of a locked mutex";
+ if (typ == ReportTypeSignalUnsafe)
+ return "signal-unsafe call inside of a signal";
+ if (typ == ReportTypeErrnoInSignal)
+ return "signal handler spoils errno";
+ return "";
}
void PrintStack(const ReportStack *ent) {
@@ -89,11 +89,17 @@ static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
}
}
+static const char *MopDesc(bool first, bool write, bool atomic) {
+ return atomic ? (first ? (write ? "Atomic write" : "Atomic read")
+ : (write ? "Previous atomic write" : "Previous atomic read"))
+ : (first ? (write ? "Write" : "Read")
+ : (write ? "Previous write" : "Previous read"));
+}
+
static void PrintMop(const ReportMop *mop, bool first) {
char thrbuf[kThreadBufSize];
Printf(" %s of size %d at %p by %s",
- (first ? (mop->write ? "Write" : "Read")
- : (mop->write ? "Previous write" : "Previous read")),
+ MopDesc(first, mop->write, mop->atomic),
mop->size, (void*)mop->addr,
thread_name(thrbuf, mop->tid));
PrintMutexSet(mop->mset);
@@ -135,7 +141,7 @@ static void PrintThread(const ReportThread *rt) {
if (rt->id == 0) // Little sense in describing the main thread.
return;
Printf(" Thread T%d", rt->id);
- if (rt->name)
+ if (rt->name && rt->name[0] != '\0')
Printf(" '%s'", rt->name);
char thrbuf[kThreadBufSize];
Printf(" (tid=%zu, %s) created by %s",
@@ -152,9 +158,29 @@ static void PrintSleep(const ReportStack *s) {
PrintStack(s);
}
+static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
+ if (rep->mops.Size())
+ return rep->mops[0]->stack;
+ if (rep->stacks.Size())
+ return rep->stacks[0];
+ if (rep->mutexes.Size())
+ return rep->mutexes[0]->stack;
+ if (rep->threads.Size())
+ return rep->threads[0]->stack;
+ return 0;
+}
+
+ReportStack *SkipTsanInternalFrames(ReportStack *ent) {
+ while (FrameIsInternal(ent) && ent->next)
+ ent = ent->next;
+ return ent;
+}
+
void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
- PrintHeader(rep->typ);
+ const char *rep_typ_str = ReportTypeString(rep->typ);
+ Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
+ (int)internal_getpid());
for (uptr i = 0; i < rep->stacks.Size(); i++) {
if (i)
@@ -177,6 +203,12 @@ void PrintReport(const ReportDesc *rep) {
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
+ if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
+ Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
+
+ if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
+ ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+
Printf("==================\n");
}
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index f6715d1aae9b..b2ce0dd66a27 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -20,6 +20,7 @@ namespace __tsan {
enum ReportType {
ReportTypeRace,
+ ReportTypeVptrRace,
ReportTypeUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
@@ -48,6 +49,7 @@ struct ReportMop {
uptr addr;
int size;
bool write;
+ bool atomic;
Vector<ReportMopMutex> mset;
ReportStack *stack;
@@ -100,6 +102,7 @@ class ReportDesc {
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
ReportStack *sleep;
+ int count;
ReportDesc();
~ReportDesc();
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 493ed2055dfc..5924858c84c5 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -23,6 +23,7 @@
#include "tsan_rtl.h"
#include "tsan_mman.h"
#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
volatile int __tsan_resumed = 0;
@@ -37,17 +38,40 @@ THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+// Can be overriden by a front-end.
+bool CPP_WEAK OnFinalize(bool failed) {
+ return failed;
+}
+
static Context *ctx;
Context *CTX() {
return ctx;
}
+static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ // Map thread trace when context is created.
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
+ new(ThreadTrace(tid)) Trace();
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
+ return new(mem) ThreadContext(tid);
+}
+
+#ifndef TSAN_GO
+static const u32 kThreadQuarantineSize = 16;
+#else
+static const u32 kThreadQuarantineSize = 64;
+#endif
+
Context::Context()
: initialized()
, report_mtx(MutexTypeReport, StatMtxReport)
, nreported()
, nmissed_expected()
- , thread_mtx(MutexTypeThreads, StatMtxThreads)
+ , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize))
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions(MBlockRacyAddresses) {
@@ -60,10 +84,12 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
: fast_state(tid, epoch)
// Do not touch these, rely on zero initialization,
// they may be accessed before the ctor.
- // , fast_ignore_reads()
- // , fast_ignore_writes()
+ // , ignore_reads_and_writes()
// , in_rtl()
, shadow_stack_pos(&shadow_stack[0])
+#ifndef TSAN_GO
+ , jmp_bufs(MBlockJmpBuf)
+#endif
, tid(tid)
, unique_id(unique_id)
, stk_addr(stk_addr)
@@ -72,94 +98,74 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, tls_size(tls_size) {
}
-ThreadContext::ThreadContext(int tid)
- : tid(tid)
- , unique_id()
- , os_id()
- , user_id()
- , thr()
- , status(ThreadStatusInvalid)
- , detached()
- , reuse_count()
- , epoch0()
- , epoch1()
- , dead_info()
- , dead_next()
- , name() {
-}
-
-static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
- uptr shadow = GetShadowMemoryConsumption();
-
- int nthread = 0;
- int nlivethread = 0;
- uptr threadmem = 0;
- {
- Lock l(&ctx->thread_mtx);
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0)
- continue;
- nthread += 1;
- threadmem += sizeof(ThreadContext);
- if (tctx->status != ThreadStatusRunning)
- continue;
- nlivethread += 1;
- threadmem += sizeof(ThreadState);
- }
- }
-
- uptr nsync = 0;
- uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
-
- internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
- " thread=%zuMB(total=%d/live=%d)"
- " sync=%zuMB(cnt=%zu)\n",
- num,
- shadow >> 20,
- threadmem >> 20, nthread, nlivethread,
- syncmem >> 20, nsync);
+static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
+ uptr n_threads;
+ uptr n_running_threads;
+ ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+ InternalScopedBuffer<char> buf(4096);
+ internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
+ i, n_threads, n_running_threads);
+ internal_write(fd, buf.data(), internal_strlen(buf.data()));
+ WriteMemoryProfile(buf.data(), buf.size());
+ internal_write(fd, buf.data(), internal_strlen(buf.data()));
}
-static void MemoryProfileThread(void *arg) {
+static void BackgroundThread(void *arg) {
ScopedInRtl in_rtl;
- fd_t fd = (fd_t)(uptr)arg;
+ Context *ctx = CTX();
+ const u64 kMs2Ns = 1000 * 1000;
+
+ fd_t mprof_fd = kInvalidFd;
+ if (flags()->profile_memory && flags()->profile_memory[0]) {
+ InternalScopedBuffer<char> filename(4096);
+ internal_snprintf(filename.data(), filename.size(), "%s.%d",
+ flags()->profile_memory, (int)internal_getpid());
+ uptr openrv = OpenFile(filename.data(), true);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = openrv;
+ }
+ }
+
+ u64 last_flush = NanoTime();
for (int i = 0; ; i++) {
- InternalScopedBuffer<char> buf(4096);
- WriteMemoryProfile(buf.data(), buf.size(), i);
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
SleepForSeconds(1);
- }
-}
+ u64 now = NanoTime();
+
+ // Flush memory if requested.
+ if (flags()->flush_memory_ms) {
+ if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
+ FlushShadowMemory();
+ last_flush = NanoTime();
+ }
+ }
-static void InitializeMemoryProfile() {
- if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
- return;
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, GetPid());
- fd_t fd = internal_open(filename.data(), true);
- if (fd == kInvalidFd) {
- Printf("Failed to open memory profile file '%s'\n", &filename[0]);
- Die();
- }
- internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
-}
+ // Write memory profile if requested.
+ if (mprof_fd != kInvalidFd)
+ MemoryProfiler(ctx, mprof_fd, i);
-static void MemoryFlushThread(void *arg) {
- ScopedInRtl in_rtl;
- for (int i = 0; ; i++) {
- SleepForMillis(flags()->flush_memory_ms);
- FlushShadowMemory();
+#ifndef TSAN_GO
+ // Flush symbolizer cache if requested.
+ if (flags()->flush_symbolizer_ms > 0) {
+ u64 last = atomic_load(&ctx->last_symbolize_time_ns,
+ memory_order_relaxed);
+ if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
+ Lock l(&ctx->report_mtx);
+ SpinMutexLock l2(&CommonSanitizerReportMutex);
+ SymbolizeFlush();
+ atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
+ }
+ }
+#endif
}
}
-static void InitializeMemoryFlush() {
- if (flags()->flush_memory_ms == 0)
- return;
- if (flags()->flush_memory_ms < 100)
- flags()->flush_memory_ms = 100;
- internal_start_thread(&MemoryFlushThread, 0);
+void DontNeedShadowFor(uptr addr, uptr size) {
+ uptr shadow_beg = MemToShadow(addr);
+ uptr shadow_end = MemToShadow(addr + size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
}
void MapShadow(uptr addr, uptr size) {
@@ -182,6 +188,7 @@ void Initialize(ThreadState *thr) {
if (is_initialized)
return;
is_initialized = true;
+ SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
@@ -197,9 +204,6 @@ void Initialize(ThreadState *thr) {
#ifndef TSAN_GO
InitializeShadowMemory();
#endif
- ctx->dead_list_size = 0;
- ctx->dead_list_head = 0;
- ctx->dead_list_tail = 0;
InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
if (internal_strcmp(flags()->log_path, "stdout") == 0)
@@ -220,26 +224,24 @@ void Initialize(ThreadState *thr) {
}
}
#endif
- InitializeMemoryProfile();
- InitializeMemoryFlush();
+ internal_start_thread(&BackgroundThread, 0);
if (ctx->flags.verbosity)
Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
- GetPid());
+ (int)internal_getpid());
// Initialize thread 0.
- ctx->thread_seq = 0;
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
- ThreadStart(thr, tid, GetPid());
+ ThreadStart(thr, tid, internal_getpid());
CHECK_EQ(thr->in_rtl, 1);
ctx->initialized = true;
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
" Call __tsan_resume().\n",
- GetPid());
- while (__tsan_resumed == 0);
+ (int)internal_getpid());
+ while (__tsan_resumed == 0) {}
}
}
@@ -253,8 +255,15 @@ int Finalize(ThreadState *thr) {
// Wait for pending reports.
ctx->report_mtx.Lock();
+ CommonSanitizerReportMutex.Lock();
+ CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
+#ifndef TSAN_GO
+ if (ctx->flags.verbosity)
+ AllocatorPrintStats();
+#endif
+
ThreadFinalize(thr);
if (ctx->nreported) {
@@ -272,6 +281,15 @@ int Finalize(ThreadState *thr) {
ctx->nmissed_expected);
}
+ if (flags()->print_suppressions)
+ PrintMatchedSuppressions();
+#ifndef TSAN_GO
+ if (flags()->print_benign)
+ PrintMatchedBenignRaces();
+#endif
+
+ failed = OnFinalize(failed);
+
StatAggregate(ctx->stat, thr->stat);
StatOutput(ctx->stat);
return failed ? flags()->exitcode : 0;
@@ -296,15 +314,20 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
ScopedInRtl in_rtl;
- Lock l(&thr->trace.mtx);
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ Lock l(&thr_trace->mtx);
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
- TraceHeader *hdr = &thr->trace.headers[trace];
+ TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
hdr->stack0.ObtainCurrent(thr, 0);
hdr->mset0 = thr->mset;
thr->nomalloc--;
}
+Trace *ThreadTrace(int tid) {
+ return (Trace*)GetThreadTraceHeader(tid);
+}
+
uptr TraceTopPC(ThreadState *thr) {
Event *events = (Event*)GetThreadTrace(thr->tid);
uptr pc = events[thr->fast_state.GetTracePos()];
@@ -330,18 +353,18 @@ extern "C" void __tsan_report_race() {
#endif
ALWAYS_INLINE
-static Shadow LoadShadow(u64 *p) {
+Shadow LoadShadow(u64 *p) {
u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
return Shadow(raw);
}
ALWAYS_INLINE
-static void StoreShadow(u64 *sp, u64 s) {
+void StoreShadow(u64 *sp, u64 s) {
atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
}
ALWAYS_INLINE
-static void StoreIfNotYetStored(u64 *sp, u64 *s) {
+void StoreIfNotYetStored(u64 *sp, u64 *s) {
StoreShadow(sp, *s);
*s = 0;
}
@@ -358,18 +381,6 @@ static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
#endif
}
-static inline bool BothReads(Shadow s, int kAccessIsWrite) {
- return !kAccessIsWrite && !s.is_write();
-}
-
-static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
- return old.is_write() || !kAccessIsWrite;
-}
-
-static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
- return !old.is_write() || kAccessIsWrite;
-}
-
static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
return old.epoch() >= thr->fast_synch_epoch;
}
@@ -378,9 +389,9 @@ static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-ALWAYS_INLINE
+ALWAYS_INLINE USED
void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
@@ -452,9 +463,30 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
return;
}
-ALWAYS_INLINE
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic) {
+ while (size) {
+ int size1 = 1;
+ int kAccessSizeLog = kSizeLog1;
+ if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ size1 = 8;
+ kAccessSizeLog = kSizeLog8;
+ } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ size1 = 4;
+ kAccessSizeLog = kSizeLog4;
+ } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ size1 = 2;
+ kAccessSizeLog = kSizeLog2;
+ }
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
+ addr += size1;
+ size -= size1;
+ }
+}
+
+ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite) {
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
u64 *shadow_mem = (u64*)MemToShadow(addr);
DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
@@ -473,6 +505,16 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
+ if (*shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopRodata);
+ return;
+ }
+
FastState fast_state = thr->fast_state;
if (fast_state.GetIgnoreBit())
return;
@@ -481,17 +523,20 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
// We must not store to the trace if we do not store to the shadow.
// That is, this call must be moved somewhere below.
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
u64 val) {
+ (void)thr;
+ (void)pc;
if (size == 0)
return;
// FIXME: fix me.
@@ -508,23 +553,42 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
// let it just crash as usual.
if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
return;
- (void)thr;
- (void)pc;
- // Some programs mmap like hundreds of GBs but actually used a small part.
- // So, it's better to report a false positive on the memory
- // then to hang here senselessly.
- const uptr kMaxResetSize = 4ull*1024*1024*1024;
- if (size > kMaxResetSize)
- size = kMaxResetSize;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
- u64 *p = (u64*)MemToShadow(addr);
- CHECK(IsShadowMem((uptr)p));
- CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
- // FIXME: may overwrite a part outside the region
- for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
- p[i++] = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- p[i++] = 0;
+ if (size < 64*1024) {
+ u64 *p = (u64*)MemToShadow(addr);
+ CHECK(IsShadowMem((uptr)p));
+ CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
+ p[i++] = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ p[i++] = 0;
+ }
+ } else {
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = 4096;
+ u64 *begin = (u64*)MemToShadow(addr);
+ u64 *end = begin + size / kShadowCell * kShadowCnt;
+ u64 *p = begin;
+ // Set at least first kPageSize/2 to page boundary.
+ while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ // Reset middle part.
+ u64 *p1 = p;
+ p = RoundDown(end, kPageSize);
+ UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
+ MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
+ // Set the ending.
+ while (p < end) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
}
}
@@ -533,7 +597,17 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ // Processing more than 1k (4k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ if (size > 1024)
+ size = 1024;
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
@@ -543,6 +617,8 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.SetWrite(true);
@@ -550,7 +626,7 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
MemoryRangeSet(thr, pc, addr, size, s.raw());
}
-ALWAYS_INLINE
+ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
@@ -580,7 +656,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos++;
}
-ALWAYS_INLINE
+ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 6b0ab0d385ef..f1a73e457331 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -26,8 +26,9 @@
#ifndef TSAN_RTL_H
#define TSAN_RTL_H
-#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
@@ -46,15 +47,73 @@ namespace __tsan {
// Descriptor of user's memory block.
struct MBlock {
- Mutex mtx;
- uptr size;
- u32 alloc_tid;
- u32 alloc_stack_id;
- SyncVar *head;
+ /*
+ u64 mtx : 1; // must be first
+ u64 lst : 44;
+ u64 stk : 31; // on word boundary
+ u64 tid : kTidBits;
+ u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
+ */
+ u64 raw[2];
+
+ void Init(uptr siz, u32 tid, u32 stk) {
+ raw[0] = raw[1] = 0;
+ raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
+ raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
+ raw[0] |= (u64)stk << (1 + 44);
+ raw[1] |= (u64)stk >> (64 - 44 - 1);
+ DCHECK_EQ(Size(), siz);
+ DCHECK_EQ(Tid(), tid);
+ DCHECK_EQ(StackId(), stk);
+ }
+
+ u32 Tid() const {
+ return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
+ }
+
+ uptr Size() const {
+ return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
+ }
+
+ u32 StackId() const {
+ return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
+ }
+
+ SyncVar *ListHead() const {
+ return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
+ }
+
+ void ListPush(SyncVar *v) {
+ SyncVar *lst = ListHead();
+ v->next = lst;
+ u64 x = (u64)v ^ (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), v);
+ }
+
+ SyncVar *ListPop() {
+ SyncVar *lst = ListHead();
+ SyncVar *nxt = lst->next;
+ lst->next = 0;
+ u64 x = (u64)lst ^ (u64)nxt;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), nxt);
+ return lst;
+ }
- MBlock()
- : mtx(MutexTypeMBlock, StatMtxMBlock) {
+ void ListReset() {
+ SyncVar *lst = ListHead();
+ u64 x = (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), 0);
}
+
+ void Lock();
+ void Unlock();
+ typedef GenericScopedLock<MBlock> ScopedLock;
};
#ifndef TSAN_GO
@@ -65,22 +124,11 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL;
#endif
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-struct TsanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const { }
- void OnUnmap(uptr p, uptr size) const {
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- uptr shadow_beg = MemToShadow(p);
- uptr shadow_end = MemToShadow(p + size);
- CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached()));
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
- }
-};
-
+struct MapUnmapCallback;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
- DefaultSizeClassMap> PrimaryAllocator;
+ DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator;
+typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
Allocator *allocator();
@@ -89,6 +137,8 @@ Allocator *allocator();
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
+const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
+
// FastState (from most significant bit):
// ignore : 1
// tid : kTidBits
@@ -173,7 +223,8 @@ class FastState {
// freed : 1
// tid : kTidBits
// epoch : kClkBits
-// is_write : 1
+// is_atomic : 1
+// is_read : 1
// size_log : 2
// addr0 : 3
class Shadow : public FastState {
@@ -197,13 +248,26 @@ class Shadow : public FastState {
}
void SetWrite(unsigned kAccessIsWrite) {
- DCHECK_EQ(x_ & 32, 0);
- if (kAccessIsWrite)
- x_ |= 32;
- DCHECK_EQ(kAccessIsWrite, is_write());
+ DCHECK_EQ(x_ & kReadBit, 0);
+ if (!kAccessIsWrite)
+ x_ |= kReadBit;
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
+ }
+
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
}
- bool IsZero() const { return x_ == 0; }
+ bool IsAtomic() const {
+ return x_ & kAtomicBit;
+ }
+
+ bool IsZero() const {
+ return x_ == 0;
+ }
static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
@@ -250,7 +314,8 @@ class Shadow : public FastState {
}
u64 addr0() const { return x_ & 7; }
u64 size() const { return 1ull << size_log(); }
- bool is_write() const { return x_ & 32; }
+ bool IsWrite() const { return !IsRead(); }
+ bool IsRead() const { return x_ & kReadBit; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
@@ -265,13 +330,46 @@ class Shadow : public FastState {
x_ |= kFreedBit;
}
+ bool IsFreed() const {
+ return x_ & kFreedBit;
+ }
+
bool GetFreedAndReset() {
bool res = x_ & kFreedBit;
x_ &= ~kFreedBit;
return res;
}
+ bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
+ bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
+ | (kIsAtomic << kAtomicShift));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
private:
+ static const u64 kReadShift = 5;
+ static const u64 kReadBit = 1ull << kReadShift;
+ static const u64 kAtomicShift = 6;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
u64 size_log() const { return (x_ >> 3) & 3; }
static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
@@ -286,6 +384,12 @@ class Shadow : public FastState {
struct SignalContext;
+struct JmpBuf {
+ uptr sp;
+ uptr mangled_sp;
+ uptr *shadow_stack_pos;
+};
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
@@ -308,7 +412,6 @@ struct ThreadState {
uptr *shadow_stack_pos;
u64 *racy_shadow_addr;
u64 racy_state[2];
- Trace trace;
#ifndef TSAN_GO
// C/C++ uses embed shadow stack of fixed size.
uptr shadow_stack[kShadowStackSize];
@@ -321,12 +424,16 @@ struct ThreadState {
ThreadClock clock;
#ifndef TSAN_GO
AllocatorCache alloc_cache;
+ Vector<JmpBuf> jmp_bufs;
#endif
u64 stat[StatCnt];
const int tid;
const int unique_id;
int in_rtl;
+ bool in_symbolizer;
bool is_alive;
+ bool is_freeing;
+ bool is_vptr_access;
const uptr stk_addr;
const uptr stk_size;
const uptr tls_addr;
@@ -360,41 +467,30 @@ INLINE ThreadState *cur_thread() {
}
#endif
-enum ThreadStatus {
- ThreadStatusInvalid, // Non-existent thread, data is invalid.
- ThreadStatusCreated, // Created but not yet running.
- ThreadStatusRunning, // The thread is currently running.
- ThreadStatusFinished, // Joinable thread is finished but not yet joined.
- ThreadStatusDead // Joined, but some info (trace) is still alive.
-};
-
-// An info about a thread that is hold for some time after its termination.
-struct ThreadDeadInfo {
- Trace trace;
-};
-
-struct ThreadContext {
- const int tid;
- int unique_id; // Non-rolling thread id.
- uptr os_id; // pid
- uptr user_id; // Some opaque user thread id (e.g. pthread_t).
+class ThreadContext : public ThreadContextBase {
+ public:
+ explicit ThreadContext(int tid);
+ ~ThreadContext();
ThreadState *thr;
- ThreadStatus status;
- bool detached;
- int reuse_count;
+#ifdef TSAN_GO
+ StackTrace creation_stack;
+#else
+ u32 creation_stack_id;
+#endif
SyncClock sync;
// Epoch at which the thread had started.
// If we see an event from the thread stamped by an older epoch,
// the event is from a dead thread that shared tid with this thread.
u64 epoch0;
u64 epoch1;
- StackTrace creation_stack;
- int creation_tid;
- ThreadDeadInfo *dead_info;
- ThreadContext *dead_next; // In dead thread list.
- char *name; // As annotated by user.
- explicit ThreadContext(int tid);
+ // Override superclass callbacks.
+ void OnDead();
+ void OnJoined(void *arg);
+ void OnFinished();
+ void OnStarted(void *arg);
+ void OnCreated(void *arg);
+ void OnReset();
};
struct RacyStacks {
@@ -416,6 +512,7 @@ struct RacyAddress {
struct FiredSuppression {
ReportType type;
uptr pc;
+ Suppression *supp;
};
struct Context {
@@ -428,16 +525,9 @@ struct Context {
Mutex report_mtx;
int nreported;
int nmissed_expected;
+ atomic_uint64_t last_symbolize_time_ns;
- Mutex thread_mtx;
- unsigned thread_seq;
- unsigned unique_thread_seq;
- int alive_threads;
- int max_alive_threads;
- ThreadContext *threads[kMaxTid];
- int dead_list_size;
- ThreadContext* dead_list_head;
- ThreadContext* dead_list_tail;
+ ThreadRegistry *thread_registry;
Vector<RacyStacks> racy_stacks;
Vector<RacyAddress> racy_addresses;
@@ -472,6 +562,7 @@ class ScopedReport {
void AddMutex(const SyncVar *s);
void AddLocation(uptr addr, uptr size);
void AddSleep(u32 stack_id);
+ void SetCount(int count);
const ReportDesc *GetReport() const;
@@ -489,13 +580,18 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
-void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
+void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
if (kCollectStats)
thr->stat[typ] += n;
}
+void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
+ if (kCollectStats)
+ thr->stat[typ] = n;
+}
void MapShadow(uptr addr, uptr size);
void MapThreadTrace(uptr addr, uptr size);
+void DontNeedShadowFor(uptr addr, uptr size);
void InitializeShadowMemory();
void InitializeInterceptors();
void InitializeDynamicAnnotations();
@@ -503,11 +599,15 @@ void InitializeDynamicAnnotations();
void ReportRace(ThreadState *thr);
bool OutputReport(Context *ctx,
const ScopedReport &srep,
- const ReportStack *suppress_stack = 0);
+ const ReportStack *suppress_stack1 = 0,
+ const ReportStack *suppress_stack2 = 0);
bool IsFiredSuppression(Context *ctx,
const ScopedReport &srep,
const StackTrace &trace);
bool IsExpectedReport(uptr addr, uptr size);
+void PrintMatchedBenignRaces();
+bool FrameIsInternal(const ReportStack *frame);
+ReportStack *SkipTsanInternalFrames(ReportStack *ent);
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -523,6 +623,7 @@ bool IsExpectedReport(uptr addr, uptr size);
u32 CurrentStackId(ThreadState *thr, uptr pc);
void PrintCurrentStack(ThreadState *thr, uptr pc);
+void PrintCurrentStackSlow(); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
@@ -532,16 +633,42 @@ SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite);
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur);
-void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write);
+ uptr size, bool is_write);
+void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, uptr step, bool is_write);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
+}
+
+void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
+}
+
+void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
+}
+
+void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+}
+
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
@@ -564,8 +691,8 @@ void ProcessPendingSignals(ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr);
-void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
@@ -601,9 +728,10 @@ void TraceSwitch(ThreadState *thr);
uptr TraceTopPC(ThreadState *thr);
uptr TraceSize();
uptr TraceParts();
+Trace *ThreadTrace(int tid);
extern "C" void __tsan_trace_switch();
-void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
+void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, u64 addr) {
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S
index af878563573e..11c75c72dbe5 100644
--- a/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -160,6 +160,143 @@ __tsan_report_race_thunk:
ret
.cfi_endproc
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+ .cfi_startproc
+ // save env parameter
+ push %rdi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rdi, 0
+ // obtain %rsp
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // restore env parameter
+ pop %rdi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rdi
+ // tail jump to libc setjmp
+ movl $0, %eax
+ movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ .cfi_endproc
+.size setjmp, .-setjmp
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl _setjmp
+.type _setjmp, @function
+_setjmp:
+ .cfi_startproc
+ // save env parameter
+ push %rdi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rdi, 0
+ // obtain %rsp
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // restore env parameter
+ pop %rdi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rdi
+ // tail jump to libc setjmp
+ movl $0, %eax
+ movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ .cfi_endproc
+.size _setjmp, .-_setjmp
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl sigsetjmp
+.type sigsetjmp, @function
+sigsetjmp:
+ .cfi_startproc
+ // save env parameter
+ push %rdi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rdi, 0
+ // save savesigs parameter
+ push %rsi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rsi, 0
+ // align stack frame
+ sub $8, %rsp
+ .cfi_adjust_cfa_offset 8
+ // obtain %rsp
+ lea 32(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // unalign stack frame
+ add $8, %rsp
+ .cfi_adjust_cfa_offset -8
+ // restore savesigs parameter
+ pop %rsi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rsi
+ // restore env parameter
+ pop %rdi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rdi
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ .cfi_endproc
+.size sigsetjmp, .-sigsetjmp
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+ .cfi_startproc
+ // save env parameter
+ push %rdi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rdi, 0
+ // save savesigs parameter
+ push %rsi
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset %rsi, 0
+ // align stack frame
+ sub $8, %rsp
+ .cfi_adjust_cfa_offset 8
+ // obtain %rsp
+ lea 32(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // unalign stack frame
+ add $8, %rsp
+ .cfi_adjust_cfa_offset -8
+ // restore savesigs parameter
+ pop %rsi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rsi
+ // restore env parameter
+ pop %rdi
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore %rdi
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ .cfi_endproc
+.size __sigsetjmp, .-__sigsetjmp
+
#ifdef __linux__
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index d812f12be560..cf2e44dd09ee 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -26,8 +26,12 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
- if (!linker_init && IsAppMem(addr))
- MemoryWrite1Byte(thr, pc, addr);
+ if (!linker_init && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
@@ -49,13 +53,17 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
if (s == 0)
return;
- if (IsAppMem(addr))
- MemoryWrite1Byte(thr, pc, addr);
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
- Lock l(&ctx->thread_mtx);
+ ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(s);
StackTrace trace;
@@ -71,11 +79,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
DestroyAndFree(s);
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
CHECK_GT(thr->in_rtl, 0);
- DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
+ DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
+ CHECK_GT(rec, 0);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
@@ -99,19 +108,20 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
} else if (!s->is_recursive) {
StatInc(thr, StatMutexRecLock);
}
- s->recursion++;
+ s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
s->mtx.Unlock();
}
-void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
CHECK_GT(thr->in_rtl, 0);
- DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
+ DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ int rec = 0;
if (s->recursion == 0) {
if (!s->is_broken) {
s->is_broken = true;
@@ -125,7 +135,8 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
PrintCurrentStack(thr, pc);
}
} else {
- s->recursion--;
+ rec = all ? s->recursion : 1;
+ s->recursion -= rec;
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
s->owner_tid = SyncVar::kInvalidTid;
@@ -139,6 +150,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
thr->mset.Del(s->GetId(), true);
s->mtx.Unlock();
+ return rec;
}
void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
@@ -146,7 +158,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
@@ -167,7 +179,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
@@ -188,7 +200,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
if (s->owner_tid == SyncVar::kInvalidTid) {
@@ -240,18 +252,19 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.ReadUnlock();
}
+static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status == ThreadStatusRunning)
+ thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
+ else
+ thr->clock.set(tctx->tid, tctx->epoch1);
+}
+
void AcquireGlobal(ThreadState *thr, uptr pc) {
- Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0)
- continue;
- if (tctx->status == ThreadStatusRunning)
- thr->clock.set(i, tctx->thr->fast_state.epoch());
- else
- thr->clock.set(i, tctx->epoch1);
- }
+ ThreadRegistryLock l(CTX()->thread_registry);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateClockCallback, thr);
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
@@ -275,19 +288,20 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
}
#ifndef TSAN_GO
+static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status == ThreadStatusRunning)
+ thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
+ else
+ thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
+}
+
void AfterSleep(ThreadState *thr, uptr pc) {
- Context *ctx = CTX();
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- Lock l(&ctx->thread_mtx);
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0)
- continue;
- if (tctx->status == ThreadStatusRunning)
- thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch());
- else
- thr->last_sleep_clock.set(i, tctx->epoch1);
- }
+ ThreadRegistryLock l(CTX()->thread_registry);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateSleepClockCallback, thr);
}
#endif
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 1a780e4b8070..f77a7a2efa96 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
@@ -29,12 +30,15 @@ namespace __tsan {
using namespace __sanitizer; // NOLINT
+static ReportStack *SymbolizeStack(const StackTrace& trace);
+
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
ScopedInRtl in_rtl;
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
+ PrintCurrentStackSlow();
Die();
}
@@ -121,14 +125,16 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
ScopedReport::ScopedReport(ReportType typ) {
ctx_ = CTX();
- ctx_->thread_mtx.CheckLocked();
+ ctx_->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
ctx_->report_mtx.Lock();
+ CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
+ CommonSanitizerReportMutex.Unlock();
ctx_->report_mtx.Unlock();
DestroyAndFree(rep_);
}
@@ -146,7 +152,8 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->tid = s.tid();
mop->addr = addr + s.addr0();
mop->size = s.size();
- mop->write = s.is_write();
+ mop->write = s.IsWrite();
+ mop->atomic = s.IsAtomic();
mop->stack = SymbolizeStack(*stack);
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
@@ -172,7 +179,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
void ScopedReport::AddThread(const ThreadContext *tctx) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
- if (rep_->threads[i]->id == tctx->tid)
+ if ((u32)rep_->threads[i]->id == tctx->tid)
return;
}
void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
@@ -182,42 +189,65 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
- rt->parent_tid = tctx->creation_tid;
+ rt->parent_tid = tctx->parent_tid;
+ rt->stack = 0;
+#ifdef TSAN_GO
rt->stack = SymbolizeStack(tctx->creation_stack);
+#else
+ uptr ssz = 0;
+ const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz);
+ if (stack) {
+ StackTrace trace;
+ trace.Init(stack, ssz);
+ rt->stack = SymbolizeStack(trace);
+ }
+#endif
}
#ifndef TSAN_GO
-static ThreadContext *FindThread(int unique_id) {
+static ThreadContext *FindThreadByUidLocked(int unique_id) {
Context *ctx = CTX();
- ctx->thread_mtx.CheckLocked();
+ ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx && tctx->unique_id == unique_id) {
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(i));
+ if (tctx && tctx->unique_id == (u32)unique_id) {
return tctx;
}
}
return 0;
}
+static ThreadContext *FindThreadByTidLocked(int tid) {
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+}
+
+static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
+ uptr addr = (uptr)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status != ThreadStatusRunning)
+ return false;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
+ (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
+}
+
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
Context *ctx = CTX();
- ctx->thread_mtx.CheckLocked();
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0 || tctx->status != ThreadStatusRunning)
- continue;
- ThreadState *thr = tctx->thr;
- CHECK(thr);
- if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) {
- *is_stack = true;
- return tctx;
- }
- if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) {
- *is_stack = false;
- return tctx;
- }
- }
- return 0;
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
+ (void*)addr));
+ if (!tctx)
+ return 0;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
+ return tctx;
}
#endif
@@ -231,7 +261,16 @@ void ScopedReport::AddMutex(const SyncVar *s) {
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
rm->destroyed = false;
- rm->stack = SymbolizeStack(s->creation_stack);
+ rm->stack = 0;
+#ifndef TSAN_GO
+ uptr ssz = 0;
+ const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz);
+ if (stack) {
+ StackTrace trace;
+ trace.Init(stack, ssz);
+ rm->stack = SymbolizeStack(trace);
+ }
+#endif
}
void ScopedReport::AddMutex(u64 id) {
@@ -269,27 +308,28 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
trace.Init(stack, ssz);
loc->stack = SymbolizeStack(trace);
}
- ThreadContext *tctx = FindThread(creat_tid);
+ ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
- if (allocator()->PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(0, (void*)addr);
- ThreadContext *tctx = FindThread(b->alloc_tid);
+ MBlock *b = 0;
+ if (allocator()->PointerIsMine((void*)addr)
+ && (b = user_mblock(0, (void*)addr))) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationHeap;
loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->size;
- loc->tid = tctx ? tctx->tid : b->alloc_tid;
+ loc->size = b->Size();
+ loc->tid = tctx ? tctx->tid : b->Tid();
loc->name = 0;
loc->file = 0;
loc->line = 0;
loc->stack = 0;
uptr ssz = 0;
- const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
+ const uptr *stack = StackDepotGet(b->StackId(), &ssz);
if (stack) {
StackTrace trace;
trace.Init(stack, ssz);
@@ -328,6 +368,10 @@ void ScopedReport::AddSleep(u32 stack_id) {
}
#endif
+void ScopedReport::SetCount(int count) {
+ rep_->count = count;
+}
+
const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
@@ -336,21 +380,17 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- ThreadContext *tctx = CTX()->threads[tid];
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
if (tctx == 0)
return;
- Trace* trace = 0;
- if (tctx->status == ThreadStatusRunning) {
- CHECK(tctx->thr);
- trace = &tctx->thr->trace;
- } else if (tctx->status == ThreadStatusFinished
- || tctx->status == ThreadStatusDead) {
- if (tctx->dead_info == 0)
- return;
- trace = &tctx->dead_info->trace;
- } else {
+ if (tctx->status != ThreadStatusRunning
+ && tctx->status != ThreadStatusFinished
+ && tctx->status != ThreadStatusDead)
return;
- }
+ Trace* trace = ThreadTrace(tctx->tid);
Lock l(&trace->mtx);
const int partidx = (epoch / kTracePartSize) % TraceParts();
TraceHeader* hdr = &trace->headers[partidx];
@@ -460,12 +500,17 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
bool OutputReport(Context *ctx,
const ScopedReport &srep,
- const ReportStack *suppress_stack) {
+ const ReportStack *suppress_stack1,
+ const ReportStack *suppress_stack2) {
+ atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
const ReportDesc *rep = srep.GetReport();
- const uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack);
+ Suppression *supp = 0;
+ uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
+ if (suppress_pc == 0)
+ suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
if (suppress_pc != 0) {
- FiredSuppression supp = {srep.GetReport()->typ, suppress_pc};
- ctx->fired_suppressions.PushBack(supp);
+ FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
+ ctx->fired_suppressions.PushBack(s);
}
if (OnReport(rep, suppress_pc != 0))
return false;
@@ -481,13 +526,24 @@ bool IsFiredSuppression(Context *ctx,
if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
continue;
for (uptr j = 0; j < trace.Size(); j++) {
- if (trace.Get(j) == ctx->fired_suppressions[k].pc)
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (trace.Get(j) == s->pc) {
+ if (s->supp)
+ s->supp->hit_count++;
return true;
+ }
}
}
return false;
}
+bool FrameIsInternal(const ReportStack *frame) {
+ return frame != 0 && frame->file != 0
+ && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
+ internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(frame->file, "tsan_interface_"));
+}
+
// On programs that use Java we see weird reports like:
// WARNING: ThreadSanitizer: data race (pid=22512)
// Read of size 8 at 0x7d2b00084318 by thread 100:
@@ -497,30 +553,42 @@ bool IsFiredSuppression(Context *ctx,
// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
// #1 <null> <null>:0 (0x7f7ad9b42707)
static bool IsJavaNonsense(const ReportDesc *rep) {
+#ifndef TSAN_GO
for (uptr i = 0; i < rep->mops.Size(); i++) {
ReportMop *mop = rep->mops[i];
ReportStack *frame = mop->stack;
- if (frame != 0 && frame->func != 0
- && (internal_strcmp(frame->func, "memset") == 0
- || internal_strcmp(frame->func, "memcpy") == 0
- || internal_strcmp(frame->func, "memmove") == 0
- || internal_strcmp(frame->func, "strcmp") == 0
- || internal_strcmp(frame->func, "strncpy") == 0
- || internal_strcmp(frame->func, "strlen") == 0
- || internal_strcmp(frame->func, "free") == 0
- || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
+ if (frame == 0
+ || (frame->func == 0 && frame->file == 0 && frame->line == 0
+ && frame->module == 0)) {
+ return true;
+ }
+ if (FrameIsInternal(frame)) {
frame = frame->next;
if (frame == 0
|| (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
+ && frame->module == 0)) {
if (frame) {
- FiredSuppression supp = {rep->typ, frame->pc};
+ FiredSuppression supp = {rep->typ, frame->pc, 0};
CTX()->fired_suppressions.PushBack(supp);
}
return true;
}
}
}
+#endif
+ return false;
+}
+
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
return false;
}
@@ -529,9 +597,8 @@ void ReportRace(ThreadState *thr) {
return;
ScopedInRtl in_rtl;
- if (thr->in_signal_handler)
- Printf("ThreadSanitizer: printing report from signal handler."
- " Can crash or hang.\n");
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
bool freed = false;
{
@@ -555,9 +622,14 @@ void ReportRace(ThreadState *thr) {
}
Context *ctx = CTX();
- Lock l0(&ctx->thread_mtx);
-
- ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
+ ThreadRegistryLock l0(ctx->thread_registry);
+
+ ReportType typ = ReportTypeRace;
+ if (thr->is_vptr_access)
+ typ = ReportTypeVptrRace;
+ else if (freed)
+ typ = ReportTypeUseAfterFree;
+ ScopedReport rep(typ);
const uptr kMop = 2;
StackTrace traces[kMop];
const uptr toppc = TraceTopPC(thr);
@@ -583,7 +655,8 @@ void ReportRace(ThreadState *thr) {
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
- ThreadContext *tctx = ctx->threads[s.tid()];
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(s.tid()));
if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
continue;
rep.AddThread(tctx);
@@ -599,7 +672,8 @@ void ReportRace(ThreadState *thr) {
}
#endif
- if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack))
+ if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
+ rep.GetReport()->mops[1]->stack))
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
@@ -611,4 +685,21 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
PrintStack(SymbolizeStack(trace));
}
+void PrintCurrentStackSlow() {
+#ifndef TSAN_GO
+ __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
+ sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
+ ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(),
+ kStackTraceMax);
+ for (uptr i = 0; i < ptrace->size / 2; i++) {
+ uptr tmp = ptrace->trace[i];
+ ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
+ ptrace->trace[ptrace->size - i - 1] = tmp;
+ }
+ StackTrace trace;
+ trace.Init(ptrace->trace, ptrace->size);
+ PrintStack(SymbolizeStack(trace));
+#endif
+}
+
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 359775927834..ee13fa18db3f 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -20,144 +20,193 @@
namespace __tsan {
+// ThreadContext implementation.
+
+ThreadContext::ThreadContext(int tid)
+ : ThreadContextBase(tid)
+ , thr()
+ , sync()
+ , epoch0()
+ , epoch1() {
+}
+
#ifndef TSAN_GO
-const int kThreadQuarantineSize = 16;
-#else
-const int kThreadQuarantineSize = 64;
+ThreadContext::~ThreadContext() {
+}
#endif
-static void MaybeReportThreadLeak(ThreadContext *tctx) {
- if (tctx->detached)
+void ThreadContext::OnDead() {
+ sync.Reset();
+}
+
+void ThreadContext::OnJoined(void *arg) {
+ ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+ caller_thr->clock.acquire(&sync);
+ StatInc(caller_thr, StatSyncAcquire);
+ sync.Reset();
+}
+
+struct OnCreatedArgs {
+ ThreadState *thr;
+ uptr pc;
+};
+
+void ThreadContext::OnCreated(void *arg) {
+ thr = 0;
+ if (tid == 0)
return;
- if (tctx->status != ThreadStatusCreated
- && tctx->status != ThreadStatusRunning
- && tctx->status != ThreadStatusFinished)
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ args->thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+ args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch());
+ args->thr->fast_synch_epoch = args->thr->fast_state.epoch();
+ args->thr->clock.release(&sync);
+ StatInc(args->thr, StatSyncRelease);
+#ifdef TSAN_GO
+ creation_stack.ObtainCurrent(args->thr, args->pc);
+#else
+ creation_stack_id = CurrentStackId(args->thr, args->pc);
+#endif
+ if (reuse_count == 0)
+ StatInc(args->thr, StatThreadMaxTid);
+}
+
+void ThreadContext::OnReset() {
+ sync.Reset();
+ FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
+}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
+ thr = args->thr;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+ epoch1 = (u64)-1;
+ new(thr) ThreadState(CTX(), tid, unique_id,
+ epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#ifdef TSAN_GO
+ // Setup dynamic shadow stack.
+ const int kInitStackSize = 8;
+ args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
+ kInitStackSize * sizeof(uptr));
+ args->thr->shadow_stack_pos = thr->shadow_stack;
+ args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
+#endif
+#ifndef TSAN_GO
+ AllocatorThreadStart(args->thr);
+#endif
+ thr = args->thr;
+ thr->fast_synch_epoch = epoch0;
+ thr->clock.set(tid, epoch0);
+ thr->clock.acquire(&sync);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ const uptr trace = (epoch0 / kTracePartSize) % TraceParts();
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ thr_trace->headers[trace].epoch0 = epoch0;
+ StatInc(thr, StatSyncAcquire);
+ sync.Reset();
+ DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)epoch0, args->stk_addr, args->stk_size,
+ args->tls_addr, args->tls_size);
+ thr->is_alive = true;
+}
+
+void ThreadContext::OnFinished() {
+ if (!detached) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&sync);
+ StatInc(thr, StatSyncRelease);
+ }
+ epoch1 = thr->fast_state.epoch();
+
+#ifndef TSAN_GO
+ AllocatorThreadFinish(thr);
+#endif
+ thr->~ThreadState();
+ StatAggregate(CTX()->stat, thr->stat);
+ thr = 0;
+}
+
+#ifndef TSAN_GO
+struct ThreadLeak {
+ ThreadContext *tctx;
+ int count;
+};
+
+static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
+ Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->detached || tctx->status != ThreadStatusFinished)
return;
- ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(tctx);
- OutputReport(CTX(), rep);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
+ leaks[i].count++;
+ return;
+ }
+ }
+ ThreadLeak leak = {tctx, 1};
+ leaks.PushBack(leak);
+}
+#endif
+
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (thr->ignore_reads_and_writes) {
+ Printf("ThreadSanitizer: thread T%d finished with ignores enabled.\n",
+ thr->tid);
+ }
}
void ThreadFinalize(ThreadState *thr) {
CHECK_GT(thr->in_rtl, 0);
+ ThreadCheckIgnore(thr);
+#ifndef TSAN_GO
if (!flags()->report_thread_leaks)
return;
- Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0)
- continue;
- MaybeReportThreadLeak(tctx);
+ ThreadRegistryLock l(CTX()->thread_registry);
+ Vector<ThreadLeak> leaks(MBlockScopedBuf);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ MaybeReportThreadLeak, &leaks);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(leaks[i].tctx);
+ rep.SetCount(leaks[i].count);
+ OutputReport(CTX(), rep);
}
+#endif
}
int ThreadCount(ThreadState *thr) {
CHECK_GT(thr->in_rtl, 0);
Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- int cnt = 0;
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = ctx->threads[i];
- if (tctx == 0)
- continue;
- if (tctx->status != ThreadStatusCreated
- && tctx->status != ThreadStatusRunning)
- continue;
- cnt++;
- }
- return cnt;
-}
-
-static void ThreadDead(ThreadState *thr, ThreadContext *tctx) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
- CHECK(tctx->status == ThreadStatusRunning
- || tctx->status == ThreadStatusFinished);
- DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id);
- tctx->status = ThreadStatusDead;
- tctx->user_id = 0;
- tctx->sync.Reset();
-
- // Put to dead list.
- tctx->dead_next = 0;
- if (ctx->dead_list_size == 0)
- ctx->dead_list_head = tctx;
- else
- ctx->dead_list_tail->dead_next = tctx;
- ctx->dead_list_tail = tctx;
- ctx->dead_list_size++;
+ uptr result;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ return (int)result;
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
StatInc(thr, StatThreadCreate);
- int tid = -1;
- ThreadContext *tctx = 0;
- if (ctx->dead_list_size > kThreadQuarantineSize
- || ctx->thread_seq >= kMaxTid) {
- // Reusing old thread descriptor and tid.
- if (ctx->dead_list_size == 0) {
- Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
- kMaxTid);
- Die();
- }
- StatInc(thr, StatThreadReuse);
- tctx = ctx->dead_list_head;
- ctx->dead_list_head = tctx->dead_next;
- ctx->dead_list_size--;
- if (ctx->dead_list_size == 0) {
- CHECK_EQ(tctx->dead_next, 0);
- ctx->dead_list_head = 0;
- }
- CHECK_EQ(tctx->status, ThreadStatusDead);
- tctx->status = ThreadStatusInvalid;
- tctx->reuse_count++;
- tctx->sync.Reset();
- tid = tctx->tid;
- DestroyAndFree(tctx->dead_info);
- if (tctx->name) {
- internal_free(tctx->name);
- tctx->name = 0;
- }
- } else {
- // Allocating new thread descriptor and tid.
- StatInc(thr, StatThreadMaxTid);
- tid = ctx->thread_seq++;
- void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
- tctx = new(mem) ThreadContext(tid);
- ctx->threads[tid] = tctx;
- MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- }
- CHECK_NE(tctx, 0);
- CHECK_GE(tid, 0);
- CHECK_LT(tid, kMaxTid);
+ Context *ctx = CTX();
+ OnCreatedArgs args = { thr, pc };
+ int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
- CHECK_EQ(tctx->status, ThreadStatusInvalid);
- ctx->alive_threads++;
- if (ctx->max_alive_threads < ctx->alive_threads) {
- ctx->max_alive_threads++;
- CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads);
- StatInc(thr, StatThreadMaxAlive);
- }
- tctx->status = ThreadStatusCreated;
- tctx->thr = 0;
- tctx->user_id = uid;
- tctx->unique_id = ctx->unique_thread_seq++;
- tctx->detached = detached;
- if (tid) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&tctx->sync);
- StatInc(thr, StatSyncRelease);
- tctx->creation_stack.ObtainCurrent(thr, pc);
- tctx->creation_tid = thr->tid;
- }
+ StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
return tid;
}
@@ -170,9 +219,8 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
- if (stk_addr && stk_size) {
- MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size);
- }
+ if (stk_addr && stk_size)
+ MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
if (tls_addr && tls_size) {
// Check that the thr object is in tls;
@@ -183,113 +231,42 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
- MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
- MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end);
+ MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/ 2,
+ thr_end, tls_addr + tls_size - thr_end);
}
}
- Lock l(&CTX()->thread_mtx);
- ThreadContext *tctx = CTX()->threads[tid];
- CHECK_NE(tctx, 0);
- CHECK_EQ(tctx->status, ThreadStatusCreated);
- tctx->status = ThreadStatusRunning;
- tctx->os_id = os_id;
- // RoundUp so that one trace part does not contain events
- // from different threads.
- tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize);
- tctx->epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, tctx->unique_id,
- tctx->epoch0, stk_addr, stk_size,
- tls_addr, tls_size);
-#ifdef TSAN_GO
- // Setup dynamic shadow stack.
- const int kInitStackSize = 8;
- thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
- kInitStackSize * sizeof(uptr));
- thr->shadow_stack_pos = thr->shadow_stack;
- thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
-#endif
- tctx->thr = thr;
- thr->fast_synch_epoch = tctx->epoch0;
- thr->clock.set(tid, tctx->epoch0);
- thr->clock.acquire(&tctx->sync);
- thr->fast_state.SetHistorySize(flags()->history_size);
- const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts();
- thr->trace.headers[trace].epoch0 = tctx->epoch0;
- StatInc(thr, StatSyncAcquire);
- DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
- "tls_addr=%zx tls_size=%zx\n",
- tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size);
- thr->is_alive = true;
+ OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
+ CTX()->thread_registry->StartThread(tid, os_id, &args);
}
void ThreadFinish(ThreadState *thr) {
CHECK_GT(thr->in_rtl, 0);
+ ThreadCheckIgnore(thr);
StatInc(thr, StatThreadFinish);
- // FIXME: Treat it as write.
if (thr->stk_addr && thr->stk_size)
- MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size);
- if (thr->tls_addr && thr->tls_size) {
- const uptr thr_beg = (uptr)thr;
- const uptr thr_end = (uptr)thr + sizeof(*thr);
- // Since the thr object is huge, skip it.
- MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr);
- MemoryResetRange(thr, /*pc=*/ 5,
- thr_end, thr->tls_addr + thr->tls_size - thr_end);
- }
+ DontNeedShadowFor(thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size)
+ DontNeedShadowFor(thr->tls_addr, thr->tls_size);
thr->is_alive = false;
Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- ThreadContext *tctx = ctx->threads[thr->tid];
- CHECK_NE(tctx, 0);
- CHECK_EQ(tctx->status, ThreadStatusRunning);
- CHECK_GT(ctx->alive_threads, 0);
- ctx->alive_threads--;
- if (tctx->detached) {
- ThreadDead(thr, tctx);
- } else {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&tctx->sync);
- StatInc(thr, StatSyncRelease);
- tctx->status = ThreadStatusFinished;
- }
+ ctx->thread_registry->FinishThread(thr->tid);
+}
- // Save from info about the thread.
- tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
- ThreadDeadInfo();
- for (uptr i = 0; i < TraceParts(); i++) {
- tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
- tctx->dead_info->trace.headers[i].stack0.CopyFrom(
- thr->trace.headers[i].stack0);
+static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+ uptr uid = (uptr)arg;
+ if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
+ tctx->user_id = 0;
+ return true;
}
- tctx->epoch1 = thr->fast_state.epoch();
-
-#ifndef TSAN_GO
- AlloctorThreadFinish(thr);
-#endif
- thr->~ThreadState();
- StatAggregate(ctx->stat, thr->stat);
- tctx->thr = 0;
+ return false;
}
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
CHECK_GT(thr->in_rtl, 0);
Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- int res = -1;
- for (unsigned tid = 0; tid < kMaxTid; tid++) {
- ThreadContext *tctx = ctx->threads[tid];
- if (tctx != 0 && tctx->user_id == uid
- && tctx->status != ThreadStatusInvalid) {
- tctx->user_id = 0;
- res = tid;
- break;
- }
- }
+ int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
return res;
}
@@ -300,18 +277,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- ThreadContext *tctx = ctx->threads[tid];
- if (tctx->status == ThreadStatusInvalid) {
- Printf("ThreadSanitizer: join of non-existent thread\n");
- return;
- }
- // FIXME(dvyukov): print message and continue (it's user error).
- CHECK_EQ(tctx->detached, false);
- CHECK_EQ(tctx->status, ThreadStatusFinished);
- thr->clock.acquire(&tctx->sync);
- StatInc(thr, StatSyncAcquire);
- ThreadDead(thr, tctx);
+ ctx->thread_registry->JoinThread(tid, thr);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
@@ -319,31 +285,12 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- ThreadContext *tctx = ctx->threads[tid];
- if (tctx->status == ThreadStatusInvalid) {
- Printf("ThreadSanitizer: detach of non-existent thread\n");
- return;
- }
- if (tctx->status == ThreadStatusFinished) {
- ThreadDead(thr, tctx);
- } else {
- tctx->detached = true;
- }
+ ctx->thread_registry->DetachThread(tid);
}
void ThreadSetName(ThreadState *thr, const char *name) {
- Context *ctx = CTX();
- Lock l(&ctx->thread_mtx);
- ThreadContext *tctx = ctx->threads[thr->tid];
- CHECK_NE(tctx, 0);
- CHECK_EQ(tctx->status, ThreadStatusRunning);
- if (tctx->name) {
- internal_free(tctx->name);
- tctx->name = 0;
- }
- if (name)
- tctx->name = internal_strdup(name);
+ CHECK_GT(thr->in_rtl, 0);
+ CTX()->thread_registry->SetThreadName(thr->tid, name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
@@ -378,6 +325,13 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
StatInc(thr, StatMopRange);
+ if (*shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMopRangeRodata);
+ return;
+ }
+
FastState fast_state = thr->fast_state;
if (fast_state.GetIgnoreBit())
return;
@@ -394,7 +348,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
}
if (unaligned)
@@ -405,7 +359,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
shadow_mem += kShadowCnt;
}
@@ -415,24 +369,30 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
}
}
-void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 0, 0);
-}
-
-void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 0, 1);
-}
-
-void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 3, 0);
-}
+void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, uptr step, bool is_write) {
+ if (size == 0)
+ return;
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+ StatInc(thr, StatMopRange);
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
-void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 3, 1);
+ for (uptr addr_end = addr + size; addr < addr_end; addr += step) {
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kSizeLog1);
+ MemoryAccessImpl(thr, addr, kSizeLog1, is_write, false,
+ shadow_mem, cur);
+ }
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index 82f1d6b5620f..9676e0872e08 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -38,6 +38,8 @@ void StatOutput(u64 *stat) {
name[StatMop8] = " size 8 ";
name[StatMopSame] = " Including same ";
name[StatMopRange] = " Including range ";
+ name[StatMopRodata] = " Including .rodata ";
+ name[StatMopRangeRodata] = " Including .rodata range ";
name[StatShadowProcessed] = "Shadow processed ";
name[StatShadowZero] = " Including empty ";
name[StatShadowNonZero] = " Including non empty ";
@@ -105,6 +107,7 @@ void StatOutput(u64 *stat) {
name[StatInt_realloc] = " realloc ";
name[StatInt_free] = " free ";
name[StatInt_cfree] = " cfree ";
+ name[StatInt_malloc_usable_size] = " malloc_usable_size ";
name[StatInt_mmap] = " mmap ";
name[StatInt_mmap64] = " mmap64 ";
name[StatInt_munmap] = " munmap ";
@@ -135,6 +138,8 @@ void StatOutput(u64 *stat) {
name[StatInt_strcpy] = " strcpy ";
name[StatInt_strncpy] = " strncpy ";
name[StatInt_strstr] = " strstr ";
+ name[StatInt_strcasecmp] = " strcasecmp ";
+ name[StatInt_strncasecmp] = " strncasecmp ";
name[StatInt_atexit] = " atexit ";
name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
@@ -174,6 +179,7 @@ void StatOutput(u64 *stat) {
name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
name[StatInt_pthread_once] = " pthread_once ";
+ name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
name[StatInt_sem_init] = " sem_init ";
name[StatInt_sem_destroy] = " sem_destroy ";
name[StatInt_sem_wait] = " sem_wait ";
@@ -181,6 +187,18 @@ void StatOutput(u64 *stat) {
name[StatInt_sem_timedwait] = " sem_timedwait ";
name[StatInt_sem_post] = " sem_post ";
name[StatInt_sem_getvalue] = " sem_getvalue ";
+ name[StatInt_stat] = " stat ";
+ name[StatInt___xstat] = " __xstat ";
+ name[StatInt_stat64] = " stat64 ";
+ name[StatInt___xstat64] = " __xstat64 ";
+ name[StatInt_lstat] = " lstat ";
+ name[StatInt___lxstat] = " __lxstat ";
+ name[StatInt_lstat64] = " lstat64 ";
+ name[StatInt___lxstat64] = " __lxstat64 ";
+ name[StatInt_fstat] = " fstat ";
+ name[StatInt___fxstat] = " __fxstat ";
+ name[StatInt_fstat64] = " fstat64 ";
+ name[StatInt___fxstat64] = " __fxstat64 ";
name[StatInt_open] = " open ";
name[StatInt_open64] = " open64 ";
name[StatInt_creat] = " creat ";
@@ -195,12 +213,15 @@ void StatOutput(u64 *stat) {
name[StatInt_socket] = " socket ";
name[StatInt_socketpair] = " socketpair ";
name[StatInt_connect] = " connect ";
+ name[StatInt_bind] = " bind ";
+ name[StatInt_listen] = " listen ";
name[StatInt_accept] = " accept ";
name[StatInt_accept4] = " accept4 ";
name[StatInt_epoll_create] = " epoll_create ";
name[StatInt_epoll_create1] = " epoll_create1 ";
name[StatInt_close] = " close ";
name[StatInt___close] = " __close ";
+ name[StatInt___res_iclose] = " __res_iclose ";
name[StatInt_pipe] = " pipe ";
name[StatInt_pipe2] = " pipe2 ";
name[StatInt_read] = " read ";
@@ -224,6 +245,8 @@ void StatOutput(u64 *stat) {
name[StatInt_fclose] = " fclose ";
name[StatInt_fread] = " fread ";
name[StatInt_fwrite] = " fwrite ";
+ name[StatInt_fflush] = " fflush ";
+ name[StatInt_abort] = " abort ";
name[StatInt_puts] = " puts ";
name[StatInt_rmdir] = " rmdir ";
name[StatInt_opendir] = " opendir ";
@@ -231,6 +254,10 @@ void StatOutput(u64 *stat) {
name[StatInt_epoll_wait] = " epoll_wait ";
name[StatInt_poll] = " poll ";
name[StatInt_sigaction] = " sigaction ";
+ name[StatInt_signal] = " signal ";
+ name[StatInt_raise] = " raise ";
+ name[StatInt_kill] = " kill ";
+ name[StatInt_pthread_kill] = " pthread_kill ";
name[StatInt_sleep] = " sleep ";
name[StatInt_usleep] = " usleep ";
name[StatInt_nanosleep] = " nanosleep ";
@@ -242,6 +269,59 @@ void StatOutput(u64 *stat) {
name[StatInt_scanf] = " scanf ";
name[StatInt_sscanf] = " sscanf ";
name[StatInt_fscanf] = " fscanf ";
+ name[StatInt___isoc99_vscanf] = " vscanf ";
+ name[StatInt___isoc99_vsscanf] = " vsscanf ";
+ name[StatInt___isoc99_vfscanf] = " vfscanf ";
+ name[StatInt___isoc99_scanf] = " scanf ";
+ name[StatInt___isoc99_sscanf] = " sscanf ";
+ name[StatInt___isoc99_fscanf] = " fscanf ";
+ name[StatInt_on_exit] = " on_exit ";
+ name[StatInt___cxa_atexit] = " __cxa_atexit ";
+ name[StatInt_localtime] = " localtime ";
+ name[StatInt_localtime_r] = " localtime_r ";
+ name[StatInt_gmtime] = " gmtime ";
+ name[StatInt_gmtime_r] = " gmtime_r ";
+ name[StatInt_ctime] = " ctime ";
+ name[StatInt_ctime_r] = " ctime_r ";
+ name[StatInt_asctime] = " asctime ";
+ name[StatInt_asctime_r] = " asctime_r ";
+ name[StatInt_frexp] = " frexp ";
+ name[StatInt_frexpf] = " frexpf ";
+ name[StatInt_frexpl] = " frexpl ";
+ name[StatInt_getpwnam] = " getpwnam ";
+ name[StatInt_getpwuid] = " getpwuid ";
+ name[StatInt_getgrnam] = " getgrnam ";
+ name[StatInt_getgrgid] = " getgrgid ";
+ name[StatInt_getpwnam_r] = " getpwnam_r ";
+ name[StatInt_getpwuid_r] = " getpwuid_r ";
+ name[StatInt_getgrnam_r] = " getgrnam_r ";
+ name[StatInt_getgrgid_r] = " getgrgid_r ";
+ name[StatInt_clock_getres] = " clock_getres ";
+ name[StatInt_clock_gettime] = " clock_gettime ";
+ name[StatInt_clock_settime] = " clock_settime ";
+ name[StatInt_getitimer] = " getitimer ";
+ name[StatInt_setitimer] = " setitimer ";
+ name[StatInt_time] = " time ";
+ name[StatInt_glob] = " glob ";
+ name[StatInt_glob64] = " glob64 ";
+ name[StatInt_wait] = " wait ";
+ name[StatInt_waitid] = " waitid ";
+ name[StatInt_waitpid] = " waitpid ";
+ name[StatInt_wait3] = " wait3 ";
+ name[StatInt_wait4] = " wait4 ";
+ name[StatInt_inet_ntop] = " inet_ntop ";
+ name[StatInt_inet_pton] = " inet_pton ";
+ name[StatInt_getaddrinfo] = " getaddrinfo ";
+ name[StatInt_getsockname] = " getsockname ";
+ name[StatInt_gethostent] = " gethostent ";
+ name[StatInt_gethostbyname] = " gethostbyname ";
+ name[StatInt_gethostbyname2] = " gethostbyname2 ";
+ name[StatInt_gethostbyaddr] = " gethostbyaddr ";
+ name[StatInt_gethostent_r] = " gethostent_r ";
+ name[StatInt_gethostbyname_r] = " gethostbyname_r ";
+ name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
+ name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
+ name[StatInt_getsockopt] = " getsockopt ";
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
@@ -251,6 +331,7 @@ void StatOutput(u64 *stat) {
name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
name[StatAnnotateCondVarWait] = " CondVarWait ";
name[StatAnnotateRWLockCreate] = " RWLockCreate ";
+ name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
name[StatAnnotateRWLockReleased] = " RWLockReleased ";
@@ -287,6 +368,7 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
index 58c5f23af40b..d5c8b4389394 100644
--- a/lib/tsan/rtl/tsan_stat.h
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -27,6 +27,8 @@ enum StatType {
StatMop8,
StatMopSame,
StatMopRange,
+ StatMopRodata,
+ StatMopRangeRodata,
StatShadowProcessed,
StatShadowZero,
StatShadowNonZero, // Derived.
@@ -102,6 +104,7 @@ enum StatType {
StatInt_realloc,
StatInt_free,
StatInt_cfree,
+ StatInt_malloc_usable_size,
StatInt_mmap,
StatInt_mmap64,
StatInt_munmap,
@@ -131,6 +134,8 @@ enum StatType {
StatInt_strncmp,
StatInt_strcpy,
StatInt_strncpy,
+ StatInt_strcasecmp,
+ StatInt_strncasecmp,
StatInt_strstr,
StatInt_atexit,
StatInt___cxa_guard_acquire,
@@ -169,6 +174,7 @@ enum StatType {
StatInt_pthread_barrier_destroy,
StatInt_pthread_barrier_wait,
StatInt_pthread_once,
+ StatInt_pthread_getschedparam,
StatInt_sem_init,
StatInt_sem_destroy,
StatInt_sem_wait,
@@ -176,6 +182,18 @@ enum StatType {
StatInt_sem_timedwait,
StatInt_sem_post,
StatInt_sem_getvalue,
+ StatInt_stat,
+ StatInt___xstat,
+ StatInt_stat64,
+ StatInt___xstat64,
+ StatInt_lstat,
+ StatInt___lxstat,
+ StatInt_lstat64,
+ StatInt___lxstat64,
+ StatInt_fstat,
+ StatInt___fxstat,
+ StatInt_fstat64,
+ StatInt___fxstat64,
StatInt_open,
StatInt_open64,
StatInt_creat,
@@ -190,12 +208,15 @@ enum StatType {
StatInt_socket,
StatInt_socketpair,
StatInt_connect,
+ StatInt_bind,
+ StatInt_listen,
StatInt_accept,
StatInt_accept4,
StatInt_epoll_create,
StatInt_epoll_create1,
StatInt_close,
StatInt___close,
+ StatInt___res_iclose,
StatInt_pipe,
StatInt_pipe2,
StatInt_read,
@@ -219,6 +240,8 @@ enum StatType {
StatInt_fclose,
StatInt_fread,
StatInt_fwrite,
+ StatInt_fflush,
+ StatInt_abort,
StatInt_puts,
StatInt_rmdir,
StatInt_opendir,
@@ -241,6 +264,59 @@ enum StatType {
StatInt_scanf,
StatInt_sscanf,
StatInt_fscanf,
+ StatInt___isoc99_vscanf,
+ StatInt___isoc99_vsscanf,
+ StatInt___isoc99_vfscanf,
+ StatInt___isoc99_scanf,
+ StatInt___isoc99_sscanf,
+ StatInt___isoc99_fscanf,
+ StatInt_on_exit,
+ StatInt___cxa_atexit,
+ StatInt_localtime,
+ StatInt_localtime_r,
+ StatInt_gmtime,
+ StatInt_gmtime_r,
+ StatInt_ctime,
+ StatInt_ctime_r,
+ StatInt_asctime,
+ StatInt_asctime_r,
+ StatInt_frexp,
+ StatInt_frexpf,
+ StatInt_frexpl,
+ StatInt_getpwnam,
+ StatInt_getpwuid,
+ StatInt_getgrnam,
+ StatInt_getgrgid,
+ StatInt_getpwnam_r,
+ StatInt_getpwuid_r,
+ StatInt_getgrnam_r,
+ StatInt_getgrgid_r,
+ StatInt_clock_getres,
+ StatInt_clock_gettime,
+ StatInt_clock_settime,
+ StatInt_getitimer,
+ StatInt_setitimer,
+ StatInt_time,
+ StatInt_glob,
+ StatInt_glob64,
+ StatInt_wait,
+ StatInt_waitid,
+ StatInt_waitpid,
+ StatInt_wait3,
+ StatInt_wait4,
+ StatInt_inet_ntop,
+ StatInt_inet_pton,
+ StatInt_getaddrinfo,
+ StatInt_getsockname,
+ StatInt_gethostent,
+ StatInt_gethostbyname,
+ StatInt_gethostbyname2,
+ StatInt_gethostbyaddr,
+ StatInt_gethostent_r,
+ StatInt_gethostbyname_r,
+ StatInt_gethostbyname2_r,
+ StatInt_gethostbyaddr_r,
+ StatInt_getsockopt,
// Dynamic annotations.
StatAnnotation,
@@ -289,6 +365,7 @@ enum StatType {
StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
+ StatMtxFD,
// This must be the last.
StatCnt
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index 5316f6db6a0a..6c49355bed88 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -19,6 +19,13 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
+// Can be overriden in frontend.
+#ifndef TSAN_GO
+extern "C" const char *WEAK __tsan_default_suppressions() {
+ return 0;
+}
+#endif
+
namespace __tsan {
static Suppression *g_suppressions;
@@ -31,12 +38,13 @@ static char *ReadFile(const char *filename) {
internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
else
internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
- fd_t fd = internal_open(tmp.data(), false);
- if (fd == kInvalidFd) {
+ uptr openrv = OpenFile(tmp.data(), false);
+ if (internal_iserror(openrv)) {
Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
tmp.data());
Die();
}
+ fd_t fd = openrv;
const uptr fsize = internal_filesize(fd);
if (fsize == (uptr)-1) {
Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
@@ -80,8 +88,7 @@ bool SuppressionMatch(char *templ, const char *str) {
return true;
}
-Suppression *SuppressionParse(const char* supp) {
- Suppression *head = 0;
+Suppression *SuppressionParse(Suppression *head, const char* supp) {
const char *line = supp;
while (line) {
while (line[0] == ' ' || line[0] == '\t')
@@ -121,6 +128,7 @@ Suppression *SuppressionParse(const char* supp) {
s->templ = (char*)internal_alloc(MBlockSuppression, end2 - line + 1);
internal_memcpy(s->templ, line, end2 - line);
s->templ[end2 - line] = 0;
+ s->hit_count = 0;
}
if (end[0] == 0)
break;
@@ -130,11 +138,15 @@ Suppression *SuppressionParse(const char* supp) {
}
void InitializeSuppressions() {
- char *supp = ReadFile(flags()->suppressions);
- g_suppressions = SuppressionParse(supp);
+ const char *supp = ReadFile(flags()->suppressions);
+ g_suppressions = SuppressionParse(0, supp);
+#ifndef TSAN_GO
+ supp = __tsan_default_suppressions();
+ g_suppressions = SuppressionParse(g_suppressions, supp);
+#endif
}
-uptr IsSuppressed(ReportType typ, const ReportStack *stack) {
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
if (g_suppressions == 0 || stack == 0)
return 0;
SuppressionType stype;
@@ -152,12 +164,41 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack) {
for (Suppression *supp = g_suppressions; supp; supp = supp->next) {
if (stype == supp->type &&
(SuppressionMatch(supp->templ, frame->func) ||
- SuppressionMatch(supp->templ, frame->file))) {
+ SuppressionMatch(supp->templ, frame->file) ||
+ SuppressionMatch(supp->templ, frame->module))) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", supp->templ);
+ supp->hit_count++;
+ *sp = supp;
return frame->pc;
}
}
}
return 0;
}
+
+static const char *SuppTypeStr(SuppressionType t) {
+ switch (t) {
+ case SuppressionRace: return "race";
+ case SuppressionMutex: return "mutex";
+ case SuppressionThread: return "thread";
+ case SuppressionSignal: return "signal";
+ }
+ CHECK(0);
+ return "unknown";
+}
+
+void PrintMatchedSuppressions() {
+ int hit_count = 0;
+ for (Suppression *supp = g_suppressions; supp; supp = supp->next)
+ hit_count += supp->hit_count;
+ if (hit_count == 0)
+ return;
+ Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n",
+ hit_count, (int)internal_getpid());
+ for (Suppression *supp = g_suppressions; supp; supp = supp->next) {
+ if (supp->hit_count == 0)
+ continue;
+ Printf("%d %s:%s\n", supp->hit_count, SuppTypeStr(supp->type), supp->templ);
+ }
+}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h
index 61a4cca9d17a..1c98363383dc 100644
--- a/lib/tsan/rtl/tsan_suppressions.h
+++ b/lib/tsan/rtl/tsan_suppressions.h
@@ -17,10 +17,6 @@
namespace __tsan {
-void InitializeSuppressions();
-void FinalizeSuppressions();
-uptr IsSuppressed(ReportType typ, const ReportStack *stack);
-
// Exposed for testing.
enum SuppressionType {
SuppressionRace,
@@ -33,9 +29,14 @@ struct Suppression {
Suppression *next;
SuppressionType type;
char *templ;
+ int hit_count;
};
-Suppression *SuppressionParse(const char* supp);
+void InitializeSuppressions();
+void FinalizeSuppressions();
+void PrintMatchedSuppressions();
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
+Suppression *SuppressionParse(Suppression *head, const char* supp);
bool SuppressionMatch(char *templ, const char *str);
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc
index 29dfe237ffd9..12226064f5a4 100644
--- a/lib/tsan/rtl/tsan_symbolize.cc
+++ b/lib/tsan/rtl/tsan_symbolize.cc
@@ -18,9 +18,24 @@
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_flags.h"
#include "tsan_report.h"
+#include "tsan_rtl.h"
namespace __tsan {
+struct ScopedInSymbolizer {
+ ScopedInSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(!thr->in_symbolizer);
+ thr->in_symbolizer = true;
+ }
+
+ ~ScopedInSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(thr->in_symbolizer);
+ thr->in_symbolizer = false;
+ }
+};
+
ReportStack *NewReportStackEntry(uptr addr) {
ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
sizeof(ReportStack));
@@ -55,35 +70,36 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) {
}
ReportStack *SymbolizeCode(uptr addr) {
- if (flags()->external_symbolizer_path[0]) {
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++)
- new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(),
- kMaxAddrFrames);
- if (addr_frames_num == 0)
- return NewReportStackEntry(addr);
- ReportStack *top = 0;
- ReportStack *bottom = 0;
- for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
- CHECK(cur_entry);
- addr_frames[i].Clear();
- if (i == 0)
- top = cur_entry;
- else
- bottom->next = cur_entry;
- bottom = cur_entry;
- }
- return top;
+ if (!IsSymbolizerAvailable())
+ return SymbolizeCodeAddr2Line(addr);
+ ScopedInSymbolizer in_symbolizer;
+ static const uptr kMaxAddrFrames = 16;
+ InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
+ for (uptr i = 0; i < kMaxAddrFrames; i++)
+ new(&addr_frames[i]) AddressInfo();
+ uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(),
+ kMaxAddrFrames);
+ if (addr_frames_num == 0)
+ return NewReportStackEntry(addr);
+ ReportStack *top = 0;
+ ReportStack *bottom = 0;
+ for (uptr i = 0; i < addr_frames_num; i++) {
+ ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
+ CHECK(cur_entry);
+ addr_frames[i].Clear();
+ if (i == 0)
+ top = cur_entry;
+ else
+ bottom->next = cur_entry;
+ bottom = cur_entry;
}
- return SymbolizeCodeAddr2Line(addr);
+ return top;
}
ReportLocation *SymbolizeData(uptr addr) {
- if (flags()->external_symbolizer_path[0] == 0)
+ if (!IsSymbolizerAvailable())
return 0;
+ ScopedInSymbolizer in_symbolizer;
DataInfo info;
if (!__sanitizer::SymbolizeData(addr, &info))
return 0;
@@ -100,4 +116,11 @@ ReportLocation *SymbolizeData(uptr addr) {
return ent;
}
+void SymbolizeFlush() {
+ if (!IsSymbolizerAvailable())
+ return;
+ ScopedInSymbolizer in_symbolizer;
+ __sanitizer::FlushSymbolizer();
+}
+
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h
index 29193043cd70..7bc6123df57d 100644
--- a/lib/tsan/rtl/tsan_symbolize.h
+++ b/lib/tsan/rtl/tsan_symbolize.h
@@ -20,6 +20,7 @@ namespace __tsan {
ReportStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
+void SymbolizeFlush();
ReportStack *SymbolizeCodeAddr2Line(uptr addr);
diff --git a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
index 76926e2b5aaf..47f9e1fbf418 100644
--- a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
+++ b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc
@@ -87,7 +87,8 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
InternalScopedBuffer<char> tmp(128);
if (ctx->is_first) {
- internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe", GetPid());
+ internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
+ (int)internal_getpid());
info->dlpi_name = tmp.data();
}
ctx->is_first = false;
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index b25346ef344f..c6ddcdb37426 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -63,7 +63,7 @@ SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
SyncVar *res = new(mem) SyncVar(addr, uid);
#ifndef TSAN_GO
- res->creation_stack.ObtainCurrent(thr, pc);
+ res->creation_stack_id = CurrentStackId(thr, pc);
#endif
return res;
}
@@ -82,9 +82,10 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
// the hashmap anyway.
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(thr, (void*)addr);
- Lock l(&b->mtx);
+ CHECK_NE(b, 0);
+ MBlock::ScopedLock l(b);
SyncVar *res = 0;
- for (res = b->head; res; res = res->next) {
+ for (res = b->ListHead(); res; res = res->next) {
if (res->addr == addr)
break;
}
@@ -92,8 +93,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
if (!create)
return 0;
res = Create(thr, pc, addr);
- res->next = b->head;
- b->head = res;
+ b->ListPush(res);
}
if (write_lock)
res->mtx.Lock();
@@ -147,27 +147,37 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
}
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
MBlock *b = user_mblock(thr, (void*)addr);
+ CHECK_NE(b, 0);
SyncVar *res = 0;
{
- Lock l(&b->mtx);
- SyncVar **prev = &b->head;
- res = *prev;
- while (res) {
+ MBlock::ScopedLock l(b);
+ res = b->ListHead();
+ if (res) {
if (res->addr == addr) {
if (res->is_linker_init)
return 0;
- *prev = res->next;
- break;
+ b->ListPop();
+ } else {
+ SyncVar **prev = &res->next;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
}
- prev = &res->next;
- res = *prev;
}
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
return res;
}
#endif
@@ -197,26 +207,6 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
return res;
}
-uptr SyncVar::GetMemoryConsumption() {
- return sizeof(*this)
- + clock.size() * sizeof(u64)
- + read_clock.size() * sizeof(u64)
- + creation_stack.Size() * sizeof(uptr);
-}
-
-uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
- uptr mem = 0;
- for (int i = 0; i < kPartCount; i++) {
- Part *p = &tab_[i];
- Lock l(&p->mtx);
- for (SyncVar *s = p->val; s; s = s->next) {
- *nsync += 1;
- mem += s->GetMemoryConsumption();
- }
- }
- return mem;
-}
-
int SyncTab::PartIdx(uptr addr) {
return (addr >> 3) % kPartCount;
}
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index 77749e22ffc2..823af543f590 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -59,7 +59,7 @@ struct SyncVar {
const u64 uid; // Globally unique id.
SyncClock clock;
SyncClock read_clock; // Used for rw mutexes only.
- StackTrace creation_stack;
+ u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
int recursion;
diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
index 2c435556abb2..e7c036c5dea8 100644
--- a/lib/tsan/rtl/tsan_update_shadow_word_inl.h
+++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
@@ -34,7 +34,7 @@ do {
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
if (OldIsInSameSynchEpoch(old, thr)) {
- if (OldIsRWNotWeaker(old, kAccessIsWrite)) {
+ if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
// found a slot that holds effectively the same info
// (that is, same tid, same sync epoch and same size)
StatInc(thr, StatMopSame);
@@ -43,7 +43,7 @@ do {
StoreIfNotYetStored(sp, &store_word);
break;
}
- if (OldIsRWWeakerOrEqual(old, kAccessIsWrite))
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
}
@@ -52,25 +52,23 @@ do {
StoreIfNotYetStored(sp, &store_word);
break;
}
- if (BothReads(old, kAccessIsWrite))
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
goto RACE;
}
-
// Do the memory access intersect?
- if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
+ // In Go all memory accesses are 1 byte, so there can be no intersections.
+ if (kCppMode && Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
StatInc(thr, StatShadowIntersect);
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
break;
}
StatInc(thr, StatShadowAnotherThread);
- if (HappensBefore(old, thr))
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
-
- if (BothReads(old, kAccessIsWrite))
+ if (HappensBefore(old, thr))
break;
-
goto RACE;
}
// The accesses do not intersect.
diff --git a/lib/tsan/rtl/tsan_vector.h b/lib/tsan/rtl/tsan_vector.h
index d41063df3de5..fa236b1f1e44 100644
--- a/lib/tsan/rtl/tsan_vector.h
+++ b/lib/tsan/rtl/tsan_vector.h
@@ -64,6 +64,11 @@ class Vector {
return &end_[-1];
}
+ void PopBack() {
+ DCHECK_GT(end_, begin_);
+ end_--;
+ }
+
void Resize(uptr size) {
uptr old_size = Size();
EnsureSize(size);
@@ -105,6 +110,6 @@ class Vector {
Vector(const Vector&);
void operator=(const Vector&);
};
-}
+} // namespace __tsan
#endif // #ifndef TSAN_VECTOR_H
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index 0fcc6b2b1c8f..7cc079f3d27a 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -12,9 +12,9 @@ function(add_tsan_unittest testname)
add_unittest(TsanUnitTests ${testname} ${ARGN})
# Link with TSan runtime.
target_link_libraries(${testname} clang_rt.tsan-x86_64)
- # Build tests with PIE and debug info.
- set_property(TARGET ${testname} APPEND_STRING
- PROPERTY COMPILE_FLAGS " -fPIE -g")
+ # Compile tests with the same flags as TSan runtime.
+ set_target_compile_flags(${testname} ${TSAN_CFLAGS})
+ # Link tests with -pie.
set_property(TARGET ${testname} APPEND_STRING
PROPERTY LINK_FLAGS " -pie")
endif()
diff --git a/lib/tsan/tests/rtl/tsan_test_util_linux.cc b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
index dce8db90de70..a2601486a2e1 100644
--- a/lib/tsan/tests/rtl/tsan_test_util_linux.cc
+++ b/lib/tsan/tests/rtl/tsan_test_util_linux.cc
@@ -73,7 +73,7 @@ bool OnReport(const ReportDesc *rep, bool suppressed) {
expect_report_reported = true;
return true;
}
-}
+} // namespace __tsan
static void* allocate_addr(int size, int offset_from_aligned = 0) {
static uintptr_t foo;
diff --git a/lib/tsan/tests/unit/CMakeLists.txt b/lib/tsan/tests/unit/CMakeLists.txt
index 52ebdb826939..b25a56d8d55c 100644
--- a/lib/tsan/tests/unit/CMakeLists.txt
+++ b/lib/tsan/tests/unit/CMakeLists.txt
@@ -3,7 +3,6 @@ set(TSAN_UNIT_TESTS
tsan_flags_test.cc
tsan_mman_test.cc
tsan_mutex_test.cc
- tsan_platform_test.cc
tsan_shadow_test.cc
tsan_stack_test.cc
tsan_suppressions_test.cc
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index 1a9a88f606fc..0961d2b75d11 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -10,10 +10,21 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include <limits>
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "gtest/gtest.h"
+extern "C" {
+uptr __tsan_get_current_allocated_bytes();
+uptr __tsan_get_heap_size();
+uptr __tsan_get_free_bytes();
+uptr __tsan_get_unmapped_bytes();
+uptr __tsan_get_estimated_allocated_size(uptr size);
+bool __tsan_get_ownership(void *p);
+uptr __tsan_get_allocated_size(void *p);
+}
+
namespace __tsan {
TEST(Mman, Internal) {
@@ -44,10 +55,10 @@ TEST(Mman, User) {
EXPECT_NE(p2, p);
MBlock *b = user_mblock(thr, p);
EXPECT_NE(b, (MBlock*)0);
- EXPECT_EQ(b->size, (uptr)10);
+ EXPECT_EQ(b->Size(), (uptr)10);
MBlock *b2 = user_mblock(thr, p2);
EXPECT_NE(b2, (MBlock*)0);
- EXPECT_EQ(b2->size, (uptr)20);
+ EXPECT_EQ(b2->Size(), (uptr)20);
for (int i = 0; i < 10; i++) {
p[i] = 42;
EXPECT_EQ(b, user_mblock(thr, p + i));
@@ -106,4 +117,55 @@ TEST(Mman, UserRealloc) {
}
}
+TEST(Mman, UsableSize) {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ char *p = (char*)user_alloc(thr, pc, 10);
+ char *p2 = (char*)user_alloc(thr, pc, 20);
+ EXPECT_EQ(0U, user_alloc_usable_size(thr, pc, NULL));
+ EXPECT_EQ(10U, user_alloc_usable_size(thr, pc, p));
+ EXPECT_EQ(20U, user_alloc_usable_size(thr, pc, p2));
+ user_free(thr, pc, p);
+ user_free(thr, pc, p2);
+}
+
+TEST(Mman, Stats) {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+
+ uptr alloc0 = __tsan_get_current_allocated_bytes();
+ uptr heap0 = __tsan_get_heap_size();
+ uptr free0 = __tsan_get_free_bytes();
+ uptr unmapped0 = __tsan_get_unmapped_bytes();
+
+ EXPECT_EQ(__tsan_get_estimated_allocated_size(10), (uptr)10);
+ EXPECT_EQ(__tsan_get_estimated_allocated_size(20), (uptr)20);
+ EXPECT_EQ(__tsan_get_estimated_allocated_size(100), (uptr)100);
+
+ char *p = (char*)user_alloc(thr, 0, 10);
+ EXPECT_EQ(__tsan_get_ownership(p), true);
+ EXPECT_EQ(__tsan_get_allocated_size(p), (uptr)10);
+
+ EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0 + 16);
+ EXPECT_GE(__tsan_get_heap_size(), heap0);
+ EXPECT_EQ(__tsan_get_free_bytes(), free0);
+ EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+
+ user_free(thr, 0, p);
+
+ EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0);
+ EXPECT_GE(__tsan_get_heap_size(), heap0);
+ EXPECT_EQ(__tsan_get_free_bytes(), free0);
+ EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+}
+
+TEST(Mman, CallocOverflow) {
+ size_t kArraySize = 4096;
+ volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
+ volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
+ volatile void *p = calloc(kArraySize, kArraySize2); // Should return 0.
+ EXPECT_EQ(0L, p);
+}
+
} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_mutexset_test.cc b/lib/tsan/tests/unit/tsan_mutexset_test.cc
index da1ae2e49e0c..335a7748cc1a 100644
--- a/lib/tsan/tests/unit/tsan_mutexset_test.cc
+++ b/lib/tsan/tests/unit/tsan_mutexset_test.cc
@@ -115,7 +115,8 @@ TEST(MutexSet, Overflow) {
EXPECT_EQ(mset.Size(), MutexSet::kMaxSize);
for (uptr i = 0; i < MutexSet::kMaxSize; i++) {
if (i == 0)
- Expect(mset, i, 63, true, 64, 2);
+ Expect(mset, i, MutexSet::kMaxSize - 1,
+ true, MutexSet::kMaxSize, 2);
else if (i == MutexSet::kMaxSize - 1)
Expect(mset, i, 100, true, 200, 1);
else
diff --git a/lib/tsan/tests/unit/tsan_platform_test.cc b/lib/tsan/tests/unit/tsan_platform_test.cc
deleted file mode 100644
index b43dbb4e4ff3..000000000000
--- a/lib/tsan/tests/unit/tsan_platform_test.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-//===-- tsan_platform_test.cc ---------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
-#include "gtest/gtest.h"
-
-namespace __tsan {
-
-static void TestThreadInfo(bool main) {
- ScopedInRtl in_rtl;
- uptr stk_addr = 0;
- uptr stk_size = 0;
- uptr tls_addr = 0;
- uptr tls_size = 0;
- GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
- // Printf("stk=%zx-%zx(%zu)\n", stk_addr, stk_addr + stk_size, stk_size);
- // Printf("tls=%zx-%zx(%zu)\n", tls_addr, tls_addr + tls_size, tls_size);
-
- int stack_var;
- EXPECT_NE(stk_addr, (uptr)0);
- EXPECT_NE(stk_size, (uptr)0);
- EXPECT_GT((uptr)&stack_var, stk_addr);
- EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
-
- static __thread int thread_var;
- EXPECT_NE(tls_addr, (uptr)0);
- EXPECT_NE(tls_size, (uptr)0);
- EXPECT_GT((uptr)&thread_var, tls_addr);
- EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
-
- // Ensure that tls and stack do not intersect.
- uptr tls_end = tls_addr + tls_size;
- EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
- EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size);
- EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr));
-}
-
-static void *WorkerThread(void *arg) {
- TestThreadInfo(false);
- return 0;
-}
-
-TEST(Platform, ThreadInfoMain) {
- TestThreadInfo(true);
-}
-
-TEST(Platform, ThreadInfoWorker) {
- pthread_t t;
- pthread_create(&t, 0, WorkerThread, 0);
- pthread_join(t, 0);
-}
-
-TEST(Platform, FileOps) {
- const char *str1 = "qwerty";
- uptr len1 = internal_strlen(str1);
- const char *str2 = "zxcv";
- uptr len2 = internal_strlen(str2);
-
- fd_t fd = internal_open("./tsan_test.tmp", true);
- EXPECT_NE(fd, kInvalidFd);
- EXPECT_EQ(len1, internal_write(fd, str1, len1));
- EXPECT_EQ(len2, internal_write(fd, str2, len2));
- internal_close(fd);
-
- fd = internal_open("./tsan_test.tmp", false);
- EXPECT_NE(fd, kInvalidFd);
- EXPECT_EQ(len1 + len2, internal_filesize(fd));
- char buf[64] = {};
- EXPECT_EQ(len1, internal_read(fd, buf, len1));
- EXPECT_EQ(0, internal_memcmp(buf, str1, len1));
- EXPECT_EQ((char)0, buf[len1 + 1]);
- internal_memset(buf, 0, len1);
- EXPECT_EQ(len2, internal_read(fd, buf, len2));
- EXPECT_EQ(0, internal_memcmp(buf, str2, len2));
- internal_close(fd);
-}
-
-} // namespace __tsan
diff --git a/lib/tsan/tests/unit/tsan_shadow_test.cc b/lib/tsan/tests/unit/tsan_shadow_test.cc
index fa9c982c0f6d..17b17977bf86 100644
--- a/lib/tsan/tests/unit/tsan_shadow_test.cc
+++ b/lib/tsan/tests/unit/tsan_shadow_test.cc
@@ -25,7 +25,7 @@ TEST(Shadow, FastState) {
EXPECT_EQ(s.GetHistorySize(), 0);
EXPECT_EQ(s.addr0(), (u64)0);
EXPECT_EQ(s.size(), (u64)1);
- EXPECT_EQ(s.is_write(), false);
+ EXPECT_EQ(s.IsWrite(), true);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)23);
diff --git a/lib/tsan/tests/unit/tsan_suppressions_test.cc b/lib/tsan/tests/unit/tsan_suppressions_test.cc
index e1e0c12c004c..decfa3214d23 100644
--- a/lib/tsan/tests/unit/tsan_suppressions_test.cc
+++ b/lib/tsan/tests/unit/tsan_suppressions_test.cc
@@ -20,7 +20,7 @@ namespace __tsan {
TEST(Suppressions, Parse) {
ScopedInRtl in_rtl;
- Suppression *supp0 = SuppressionParse(
+ Suppression *supp0 = SuppressionParse(0,
"race:foo\n"
" race:bar\n" // NOLINT
"race:baz \n" // NOLINT
@@ -45,7 +45,7 @@ TEST(Suppressions, Parse) {
TEST(Suppressions, Parse2) {
ScopedInRtl in_rtl;
- Suppression *supp0 = SuppressionParse(
+ Suppression *supp0 = SuppressionParse(0,
" # first line comment\n" // NOLINT
" race:bar \n" // NOLINT
"race:baz* *baz\n"
@@ -64,7 +64,7 @@ TEST(Suppressions, Parse2) {
TEST(Suppressions, Parse3) {
ScopedInRtl in_rtl;
- Suppression *supp0 = SuppressionParse(
+ Suppression *supp0 = SuppressionParse(0,
"# last suppression w/o line-feed\n"
"race:foo\n"
"race:bar"
@@ -81,7 +81,7 @@ TEST(Suppressions, Parse3) {
TEST(Suppressions, ParseType) {
ScopedInRtl in_rtl;
- Suppression *supp0 = SuppressionParse(
+ Suppression *supp0 = SuppressionParse(0,
"race:foo\n"
"thread:bar\n"
"mutex:baz\n"