|
20 | 20 |
|
21 | 21 | namespace __sanitizer {
|
22 | 22 |
|
| 23 | +template <typename T> |
23 | 24 | class PersistentAllocator {
|
24 | 25 | public:
|
25 |
| - void *alloc(uptr size); |
| 26 | + T *alloc(uptr count = 1); |
26 | 27 | uptr allocated() const {
|
27 | 28 | SpinMutexLock l(&mtx);
|
28 | 29 | return atomic_load_relaxed(&mapped_size) +
|
29 | 30 | atomic_load_relaxed(®ion_pos) - atomic_load_relaxed(®ion_end);
|
30 | 31 | }
|
31 | 32 |
|
32 | 33 | private:
|
33 |
| - void *tryAlloc(uptr size); |
34 |
| - void *refillAndAlloc(uptr size); |
| 34 | + T *tryAlloc(uptr count); |
| 35 | + T *refillAndAlloc(uptr count); |
35 | 36 | mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
36 | 37 | atomic_uintptr_t region_pos; // Region allocator for Node's.
|
37 | 38 | atomic_uintptr_t region_end;
|
38 | 39 | atomic_uintptr_t mapped_size;
|
39 | 40 | };
|
40 | 41 |
|
41 |
| -inline void *PersistentAllocator::tryAlloc(uptr size) { |
| 42 | +template <typename T> |
| 43 | +inline T *PersistentAllocator<T>::tryAlloc(uptr count) { |
42 | 44 | // Optimisic lock-free allocation, essentially try to bump the region ptr.
|
43 | 45 | for (;;) {
|
44 | 46 | uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
45 | 47 | uptr end = atomic_load(®ion_end, memory_order_acquire);
|
| 48 | + uptr size = count * sizeof(T); |
46 | 49 | if (cmp == 0 || cmp + size > end) return nullptr;
|
47 | 50 | if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
48 | 51 | memory_order_acquire))
|
49 |
| - return (void *)cmp; |
| 52 | + return reinterpret_cast<T *>(cmp); |
50 | 53 | }
|
51 | 54 | }
|
52 | 55 |
|
53 |
| -inline void *PersistentAllocator::alloc(uptr size) { |
| 56 | +template <typename T> |
| 57 | +inline T *PersistentAllocator<T>::alloc(uptr count) { |
54 | 58 | // First, try to allocate optimisitically.
|
55 |
| - void *s = tryAlloc(size); |
| 59 | + T *s = tryAlloc(count); |
56 | 60 | if (LIKELY(s))
|
57 | 61 | return s;
|
58 |
| - return refillAndAlloc(size); |
| 62 | + return refillAndAlloc(count); |
59 | 63 | }
|
60 | 64 |
|
61 |
| -inline void *PersistentAllocator::refillAndAlloc(uptr size) { |
| 65 | +template <typename T> |
| 66 | +inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) { |
62 | 67 | // If failed, lock, retry and alloc new superblock.
|
63 | 68 | SpinMutexLock l(&mtx);
|
64 | 69 | for (;;) {
|
65 |
| - void *s = tryAlloc(size); |
| 70 | + T *s = tryAlloc(count); |
66 | 71 | if (s)
|
67 | 72 | return s;
|
68 | 73 | atomic_store(®ion_pos, 0, memory_order_relaxed);
|
| 74 | + uptr size = count * sizeof(T); |
69 | 75 | uptr allocsz = 64 * 1024;
|
70 | 76 | if (allocsz < size)
|
71 | 77 | allocsz = size;
|
|
0 commit comments