|
54 | 54 |
|
55 | 55 | #include "platform.h"
|
56 | 56 | #include "vm-state-inl.h"
|
57 |
| -#include "v8threads.h" |
58 | 57 |
|
59 | 58 |
|
60 | 59 | // It seems there is a bug in some Solaris distributions (experienced in
|
@@ -84,33 +83,6 @@ namespace internal {
|
84 | 83 | static const pthread_t kNoThread = (pthread_t) 0;
|
85 | 84 |
|
86 | 85 |
|
87 |
| -static void* GetRandomMmapAddr() { |
88 |
| - Isolate* isolate = Isolate::UncheckedCurrent(); |
89 |
| - // Note that the current isolate isn't set up in a call path via |
90 |
| - // CpuFeatures::Probe. We don't care about randomization in this case because |
91 |
| - // the code page is immediately freed. |
92 |
| - if (isolate != NULL) { |
93 |
| -#ifdef V8_TARGET_ARCH_X64 |
94 |
| - uint64_t rnd1 = V8::RandomPrivate(isolate); |
95 |
| - uint64_t rnd2 = V8::RandomPrivate(isolate); |
96 |
| - uint64_t raw_addr = (rnd1 << 32) ^ rnd2; |
97 |
| - // Currently available CPUs have 48 bits of virtual addressing. Truncate |
98 |
| - // the hint address to 46 bits to give the kernel a fighting chance of |
99 |
| - // fulfilling our placement request. |
100 |
| - raw_addr &= V8_UINT64_C(0x3ffffffff000); |
101 |
| -#else |
102 |
| - uint32_t raw_addr = V8::RandomPrivate(isolate); |
103 |
| - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a |
104 |
| - // variety of ASLR modes (PAE kernel, NX compat mode, etc). |
105 |
| - raw_addr &= 0x3ffff000; |
106 |
| - raw_addr += 0x20000000; |
107 |
| -#endif |
108 |
| - return reinterpret_cast<void*>(raw_addr); |
109 |
| - } |
110 |
| - return NULL; |
111 |
| -} |
112 |
| - |
113 |
| - |
114 | 86 | double ceiling(double x) {
|
115 | 87 | return ceil(x);
|
116 | 88 | }
|
@@ -350,126 +322,43 @@ static const int kMmapFd = -1;
|
350 | 322 | static const int kMmapFdOffset = 0;
|
351 | 323 |
|
352 | 324 |
|
353 |
| -VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
354 |
| - |
355 | 325 | VirtualMemory::VirtualMemory(size_t size) {
|
356 |
| - address_ = ReserveRegion(size); |
| 326 | + address_ = mmap(NULL, size, PROT_NONE, |
| 327 | + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, |
| 328 | + kMmapFd, kMmapFdOffset); |
357 | 329 | size_ = size;
|
358 | 330 | }
|
359 | 331 |
|
360 | 332 |
|
361 |
| -VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
362 |
| - : address_(NULL), size_(0) { |
363 |
| - ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
364 |
| - size_t request_size = RoundUp(size + alignment, |
365 |
| - static_cast<intptr_t>(OS::AllocateAlignment())); |
366 |
| - void* reservation = mmap(GetRandomMmapAddr(), |
367 |
| - request_size, |
368 |
| - PROT_NONE, |
369 |
| - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
370 |
| - kMmapFd, |
371 |
| - kMmapFdOffset); |
372 |
| - if (reservation == MAP_FAILED) return; |
373 |
| - |
374 |
| - Address base = static_cast<Address>(reservation); |
375 |
| - Address aligned_base = RoundUp(base, alignment); |
376 |
| - ASSERT_LE(base, aligned_base); |
377 |
| - |
378 |
| - // Unmap extra memory reserved before and after the desired block. |
379 |
| - if (aligned_base != base) { |
380 |
| - size_t prefix_size = static_cast<size_t>(aligned_base - base); |
381 |
| - OS::Free(base, prefix_size); |
382 |
| - request_size -= prefix_size; |
383 |
| - } |
384 |
| - |
385 |
| - size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); |
386 |
| - ASSERT_LE(aligned_size, request_size); |
387 |
| - |
388 |
| - if (aligned_size != request_size) { |
389 |
| - size_t suffix_size = request_size - aligned_size; |
390 |
| - OS::Free(aligned_base + aligned_size, suffix_size); |
391 |
| - request_size -= suffix_size; |
392 |
| - } |
393 |
| - |
394 |
| - ASSERT(aligned_size == request_size); |
395 |
| - |
396 |
| - address_ = static_cast<void*>(aligned_base); |
397 |
| - size_ = aligned_size; |
398 |
| -} |
399 |
| - |
400 |
| - |
401 | 333 | VirtualMemory::~VirtualMemory() {
|
402 | 334 | if (IsReserved()) {
|
403 |
| - bool result = ReleaseRegion(address(), size()); |
404 |
| - ASSERT(result); |
405 |
| - USE(result); |
| 335 | + if (0 == munmap(address(), size())) address_ = MAP_FAILED; |
406 | 336 | }
|
407 | 337 | }
|
408 | 338 |
|
409 | 339 |
|
410 | 340 | bool VirtualMemory::IsReserved() {
|
411 |
| - return address_ != NULL; |
412 |
| -} |
413 |
| - |
414 |
| - |
415 |
| -void VirtualMemory::Reset() { |
416 |
| - address_ = NULL; |
417 |
| - size_ = 0; |
418 |
| -} |
419 |
| - |
420 |
| - |
421 |
| -bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
422 |
| - return CommitRegion(address, size, is_executable); |
| 341 | + return address_ != MAP_FAILED; |
423 | 342 | }
|
424 | 343 |
|
425 | 344 |
|
426 |
| -bool VirtualMemory::Uncommit(void* address, size_t size) { |
427 |
| - return UncommitRegion(address, size); |
428 |
| -} |
429 |
| - |
430 |
| - |
431 |
| -void* VirtualMemory::ReserveRegion(size_t size) { |
432 |
| - void* result = mmap(GetRandomMmapAddr(), |
433 |
| - size, |
434 |
| - PROT_NONE, |
435 |
| - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
436 |
| - kMmapFd, |
437 |
| - kMmapFdOffset); |
438 |
| - |
439 |
| - if (result == MAP_FAILED) return NULL; |
440 |
| - |
441 |
| - return result; |
442 |
| -} |
443 |
| - |
444 |
| - |
445 |
| -bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
446 |
| - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
447 |
| - if (MAP_FAILED == mmap(base, |
448 |
| - size, |
449 |
| - prot, |
450 |
| - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, |
451 |
| - kMmapFd, |
452 |
| - kMmapFdOffset)) { |
| 345 | +bool VirtualMemory::Commit(void* address, size_t size, bool executable) { |
| 346 | + int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); |
| 347 | + if (MAP_FAILED == mmap(address, size, prot, |
| 348 | + MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
| 349 | + kMmapFd, kMmapFdOffset)) { |
453 | 350 | return false;
|
454 | 351 | }
|
455 | 352 |
|
456 |
| - UpdateAllocatedSpaceLimits(base, size); |
| 353 | + UpdateAllocatedSpaceLimits(address, size); |
457 | 354 | return true;
|
458 | 355 | }
|
459 | 356 |
|
460 | 357 |
|
461 |
| -bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
462 |
| - return mmap(base, |
463 |
| - size, |
464 |
| - PROT_NONE, |
465 |
| - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, |
466 |
| - kMmapFd, |
467 |
| - kMmapFdOffset) != MAP_FAILED; |
468 |
| -} |
469 |
| - |
470 |
| - |
471 |
| -bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
472 |
| - return munmap(base, size) == 0; |
| 358 | +bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 359 | + return mmap(address, size, PROT_NONE, |
| 360 | + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
| 361 | + kMmapFd, kMmapFdOffset) != MAP_FAILED; |
473 | 362 | }
|
474 | 363 |
|
475 | 364 |
|
|
0 commit comments