Skip to content
This repository was archived by the owner on Apr 23, 2020. It is now read-only.

Commit 4d53d6d

Browse files
author
Simon Dardis
committed
Reland "[mips][compiler-rt] Provide 64bit atomic add and sub"
r318733 introduced a build failure for native MIPS32 systems for xray due to the lack of __sync_fetch_and_add / __syn_fetch_and_sub support. This patch extends the existing support providing atomics so that xray can be successfully built. The initial patch was reverted in r321292, as I suspected it may have caused the buildbot failure. Another patch in the updates the bot fetched caused the test failures which was reverted. Reviewers: atanasyan, dberris Differential Revision: https://reviews.llvm.org/D40385 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@321383 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 2e1adb0 commit 4d53d6d

File tree

3 files changed

+125
-73
lines changed

3 files changed

+125
-73
lines changed

lib/sanitizer_common/sanitizer_atomic_clang.h

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -78,17 +78,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
7878
typedef typename T::Type Type;
7979
Type cmpv = *cmp;
8080
Type prev;
81-
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
82-
if (sizeof(*a) == 8) {
83-
Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
84-
prev = __mips_sync_val_compare_and_swap<u64>(
85-
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
86-
} else {
87-
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
88-
}
89-
#else
9081
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
91-
#endif
9282
if (prev == cmpv) return true;
9383
*cmp = prev;
9484
return false;
@@ -104,6 +94,13 @@ INLINE bool atomic_compare_exchange_weak(volatile T *a,
10494

10595
} // namespace __sanitizer
10696

97+
// This include provides explicit template instantiations for atomic_uint64_t
98+
// on MIPS32, which does not directly support 8 byte atomics. It has to
99+
// proceed the template definitions above.
100+
#if defined(_MIPS_SIM) && defined(_ABIO32)
101+
#include "sanitizer_atomic_clang_mips.h"
102+
#endif
103+
107104
#undef ATOMIC_ORDER
108105

109106
#endif // SANITIZER_ATOMIC_CLANG_H
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
2+
//
3+
// The LLVM Compiler Infrastructure
4+
//
5+
// This file is distributed under the University of Illinois Open Source
6+
// License. See LICENSE.TXT for details.
7+
//
8+
//===----------------------------------------------------------------------===//
9+
//
10+
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11+
// Not intended for direct inclusion. Include sanitizer_atomic.h.
12+
//
13+
//===----------------------------------------------------------------------===//
14+
15+
#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
16+
#define SANITIZER_ATOMIC_CLANG_MIPS_H
17+
18+
namespace __sanitizer {
19+
20+
// MIPS32 does not support atomics > 4 bytes. To address this lack of
21+
// functionality, the sanitizer library provides helper methods which use an
22+
// internal spin lock mechanism to emulate atomic oprations when the size is
23+
// 8 bytes.
24+
static void __spin_lock(volatile int *lock) {
25+
while (__sync_lock_test_and_set(lock, 1))
26+
while (*lock) {
27+
}
28+
}
29+
30+
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
31+
32+
// Make sure the lock is on its own cache line to prevent false sharing.
33+
// Put it inside a struct that is aligned and padded to the typical MIPS
34+
// cacheline which is 32 bytes.
35+
static struct {
36+
int lock;
37+
char pad[32 - sizeof(int)];
38+
} __attribute__((aligned(32))) lock = {0, {0}};
39+
40+
template <>
41+
INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
42+
atomic_uint64_t::Type val,
43+
memory_order mo) {
44+
DCHECK(mo &
45+
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
46+
DCHECK(!((uptr)ptr % sizeof(*ptr)));
47+
48+
atomic_uint64_t::Type ret;
49+
50+
__spin_lock(&lock.lock);
51+
ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
52+
ptr->val_dont_use = ret + val;
53+
__spin_unlock(&lock.lock);
54+
55+
return ret;
56+
}
57+
58+
template <>
59+
INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
60+
atomic_uint64_t::Type val,
61+
memory_order mo) {
62+
return atomic_fetch_add(ptr, -val, mo);
63+
}
64+
65+
template <>
66+
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
67+
atomic_uint64_t::Type *cmp,
68+
atomic_uint64_t::Type xchg,
69+
memory_order mo) {
70+
DCHECK(mo &
71+
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
72+
DCHECK(!((uptr)ptr % sizeof(*ptr)));
73+
74+
typedef atomic_uint64_t::Type Type;
75+
Type cmpv = *cmp;
76+
Type prev;
77+
bool ret = false;
78+
79+
__spin_lock(&lock.lock);
80+
prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
81+
if (prev == cmpv) {
82+
ret = true;
83+
ptr->val_dont_use = xchg;
84+
}
85+
__spin_unlock(&lock.lock);
86+
87+
return ret;
88+
}
89+
90+
template <>
91+
INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
92+
memory_order mo) {
93+
DCHECK(mo &
94+
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
95+
DCHECK(!((uptr)ptr % sizeof(*ptr)));
96+
97+
atomic_uint64_t::Type zero = 0;
98+
volatile atomic_uint64_t *Newptr =
99+
const_cast<volatile atomic_uint64_t *>(ptr);
100+
return atomic_fetch_add(Newptr, zero, mo);
101+
}
102+
103+
template <>
104+
INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
105+
memory_order mo) {
106+
DCHECK(mo &
107+
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
108+
DCHECK(!((uptr)ptr % sizeof(*ptr)));
109+
110+
__spin_lock(&lock.lock);
111+
ptr->val_dont_use = v;
112+
__spin_unlock(&lock.lock);
113+
}
114+
115+
} // namespace __sanitizer
116+
117+
#endif // SANITIZER_ATOMIC_CLANG_MIPS_H
118+

lib/sanitizer_common/sanitizer_atomic_clang_other.h

Lines changed: 0 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -17,55 +17,6 @@
1717

1818
namespace __sanitizer {
1919

20-
// MIPS32 does not support atomic > 4 bytes. To address this lack of
21-
// functionality, the sanitizer library provides helper methods which use an
22-
// internal spin lock mechanism to emulate atomic oprations when the size is
23-
// 8 bytes.
24-
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
25-
static void __spin_lock(volatile int *lock) {
26-
while (__sync_lock_test_and_set(lock, 1))
27-
while (*lock) {
28-
}
29-
}
30-
31-
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
32-
33-
34-
// Make sure the lock is on its own cache line to prevent false sharing.
35-
// Put it inside a struct that is aligned and padded to the typical MIPS
36-
// cacheline which is 32 bytes.
37-
static struct {
38-
int lock;
39-
char pad[32 - sizeof(int)];
40-
} __attribute__((aligned(32))) lock = {0};
41-
42-
template <class T>
43-
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
44-
T ret;
45-
46-
__spin_lock(&lock.lock);
47-
48-
ret = *ptr;
49-
*ptr = ret + val;
50-
51-
__spin_unlock(&lock.lock);
52-
53-
return ret;
54-
}
55-
56-
template <class T>
57-
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
58-
T ret;
59-
__spin_lock(&lock.lock);
60-
61-
ret = *ptr;
62-
if (ret == oldval) *ptr = newval;
63-
64-
__spin_unlock(&lock.lock);
65-
66-
return ret;
67-
}
68-
#endif
6920

7021
INLINE void proc_yield(int cnt) {
7122
__asm__ __volatile__("" ::: "memory");
@@ -103,15 +54,8 @@ INLINE typename T::Type atomic_load(
10354
// 64-bit load on 32-bit platform.
10455
// Gross, but simple and reliable.
10556
// Assume that it is not in read-only memory.
106-
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
107-
typename T::Type volatile *val_ptr =
108-
const_cast<typename T::Type volatile *>(&a->val_dont_use);
109-
v = __mips_sync_fetch_and_add<u64>(
110-
reinterpret_cast<u64 volatile *>(val_ptr), 0);
111-
#else
11257
v = __sync_fetch_and_add(
11358
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
114-
#endif
11559
}
11660
return v;
11761
}
@@ -141,14 +85,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
14185
typename T::Type cmp = a->val_dont_use;
14286
typename T::Type cur;
14387
for (;;) {
144-
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
145-
typename T::Type volatile *val_ptr =
146-
const_cast<typename T::Type volatile *>(&a->val_dont_use);
147-
cur = __mips_sync_val_compare_and_swap<u64>(
148-
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
149-
#else
15088
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
151-
#endif
15289
if (cmp == v)
15390
break;
15491
cmp = cur;

0 commit comments

Comments
 (0)