|
54 | 54 |
|
55 | 55 | #include "platform.h"
|
56 | 56 | #include "vm-state-inl.h"
|
| 57 | +#include "v8threads.h" |
57 | 58 |
|
58 | 59 |
|
59 | 60 | // It seems there is a bug in some Solaris distributions (experienced in
|
@@ -83,6 +84,33 @@ namespace internal {
|
83 | 84 | static const pthread_t kNoThread = (pthread_t) 0;
|
84 | 85 |
|
85 | 86 |
|
| 87 | +static void* GetRandomMmapAddr() { |
| 88 | + Isolate* isolate = Isolate::UncheckedCurrent(); |
| 89 | + // Note that the current isolate isn't set up in a call path via |
| 90 | + // CpuFeatures::Probe. We don't care about randomization in this case because |
| 91 | + // the code page is immediately freed. |
| 92 | + if (isolate != NULL) { |
| 93 | +#ifdef V8_TARGET_ARCH_X64 |
| 94 | + uint64_t rnd1 = V8::RandomPrivate(isolate); |
| 95 | + uint64_t rnd2 = V8::RandomPrivate(isolate); |
| 96 | + uint64_t raw_addr = (rnd1 << 32) ^ rnd2; |
| 97 | + // Currently available CPUs have 48 bits of virtual addressing. Truncate |
| 98 | + // the hint address to 46 bits to give the kernel a fighting chance of |
| 99 | + // fulfilling our placement request. |
| 100 | + raw_addr &= V8_UINT64_C(0x3ffffffff000); |
| 101 | +#else |
| 102 | + uint32_t raw_addr = V8::RandomPrivate(isolate); |
| 103 | + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a |
| 104 | + // variety of ASLR modes (PAE kernel, NX compat mode, etc). |
| 105 | + raw_addr &= 0x3ffff000; |
| 106 | + raw_addr += 0x20000000; |
| 107 | +#endif |
| 108 | + return reinterpret_cast<void*>(raw_addr); |
| 109 | + } |
| 110 | + return NULL; |
| 111 | +} |
| 112 | + |
| 113 | + |
86 | 114 | double ceiling(double x) {
|
87 | 115 | return ceil(x);
|
88 | 116 | }
|
@@ -322,43 +350,126 @@ static const int kMmapFd = -1;
|
322 | 350 | static const int kMmapFdOffset = 0;
|
323 | 351 |
|
324 | 352 |
|
| 353 | +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
| 354 | + |
325 | 355 | VirtualMemory::VirtualMemory(size_t size) {
|
326 |
| - address_ = mmap(NULL, size, PROT_NONE, |
327 |
| - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, |
328 |
| - kMmapFd, kMmapFdOffset); |
| 356 | + address_ = ReserveRegion(size); |
329 | 357 | size_ = size;
|
330 | 358 | }
|
331 | 359 |
|
332 | 360 |
|
| 361 | +VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
| 362 | + : address_(NULL), size_(0) { |
| 363 | + ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| 364 | + size_t request_size = RoundUp(size + alignment, |
| 365 | + static_cast<intptr_t>(OS::AllocateAlignment())); |
| 366 | + void* reservation = mmap(GetRandomMmapAddr(), |
| 367 | + request_size, |
| 368 | + PROT_NONE, |
| 369 | + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| 370 | + kMmapFd, |
| 371 | + kMmapFdOffset); |
| 372 | + if (reservation == MAP_FAILED) return; |
| 373 | + |
| 374 | + Address base = static_cast<Address>(reservation); |
| 375 | + Address aligned_base = RoundUp(base, alignment); |
| 376 | + ASSERT_LE(base, aligned_base); |
| 377 | + |
| 378 | + // Unmap extra memory reserved before and after the desired block. |
| 379 | + if (aligned_base != base) { |
| 380 | + size_t prefix_size = static_cast<size_t>(aligned_base - base); |
| 381 | + OS::Free(base, prefix_size); |
| 382 | + request_size -= prefix_size; |
| 383 | + } |
| 384 | + |
| 385 | + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); |
| 386 | + ASSERT_LE(aligned_size, request_size); |
| 387 | + |
| 388 | + if (aligned_size != request_size) { |
| 389 | + size_t suffix_size = request_size - aligned_size; |
| 390 | + OS::Free(aligned_base + aligned_size, suffix_size); |
| 391 | + request_size -= suffix_size; |
| 392 | + } |
| 393 | + |
| 394 | + ASSERT(aligned_size == request_size); |
| 395 | + |
| 396 | + address_ = static_cast<void*>(aligned_base); |
| 397 | + size_ = aligned_size; |
| 398 | +} |
| 399 | + |
| 400 | + |
333 | 401 | VirtualMemory::~VirtualMemory() {
|
334 | 402 | if (IsReserved()) {
|
335 |
| - if (0 == munmap(address(), size())) address_ = MAP_FAILED; |
| 403 | + bool result = ReleaseRegion(address(), size()); |
| 404 | + ASSERT(result); |
| 405 | + USE(result); |
336 | 406 | }
|
337 | 407 | }
|
338 | 408 |
|
339 | 409 |
|
340 | 410 | bool VirtualMemory::IsReserved() {
|
341 |
| - return address_ != MAP_FAILED; |
| 411 | + return address_ != NULL; |
| 412 | +} |
| 413 | + |
| 414 | + |
| 415 | +void VirtualMemory::Reset() { |
| 416 | + address_ = NULL; |
| 417 | + size_ = 0; |
| 418 | +} |
| 419 | + |
| 420 | + |
| 421 | +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
| 422 | + return CommitRegion(address, size, is_executable); |
342 | 423 | }
|
343 | 424 |
|
344 | 425 |
|
345 |
| -bool VirtualMemory::Commit(void* address, size_t size, bool executable) { |
346 |
| - int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); |
347 |
| - if (MAP_FAILED == mmap(address, size, prot, |
348 |
| - MAP_PRIVATE | MAP_ANON | MAP_FIXED, |
349 |
| - kMmapFd, kMmapFdOffset)) { |
| 426 | +bool VirtualMemory::Uncommit(void* address, size_t size) { |
| 427 | + return UncommitRegion(address, size); |
| 428 | +} |
| 429 | + |
| 430 | + |
| 431 | +void* VirtualMemory::ReserveRegion(size_t size) { |
| 432 | + void* result = mmap(GetRandomMmapAddr(), |
| 433 | + size, |
| 434 | + PROT_NONE, |
| 435 | + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| 436 | + kMmapFd, |
| 437 | + kMmapFdOffset); |
| 438 | + |
| 439 | + if (result == MAP_FAILED) return NULL; |
| 440 | + |
| 441 | + return result; |
| 442 | +} |
| 443 | + |
| 444 | + |
| 445 | +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| 446 | + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| 447 | + if (MAP_FAILED == mmap(base, |
| 448 | + size, |
| 449 | + prot, |
| 450 | + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, |
| 451 | + kMmapFd, |
| 452 | + kMmapFdOffset)) { |
350 | 453 | return false;
|
351 | 454 | }
|
352 | 455 |
|
353 |
| - UpdateAllocatedSpaceLimits(address, size); |
| 456 | + UpdateAllocatedSpaceLimits(base, size); |
354 | 457 | return true;
|
355 | 458 | }
|
356 | 459 |
|
357 | 460 |
|
358 |
| -bool VirtualMemory::Uncommit(void* address, size_t size) { |
359 |
| - return mmap(address, size, PROT_NONE, |
360 |
| - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, |
361 |
| - kMmapFd, kMmapFdOffset) != MAP_FAILED; |
| 461 | +bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
| 462 | + return mmap(base, |
| 463 | + size, |
| 464 | + PROT_NONE, |
| 465 | + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, |
| 466 | + kMmapFd, |
| 467 | + kMmapFdOffset) != MAP_FAILED; |
| 468 | +} |
| 469 | + |
| 470 | + |
| 471 | +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| 472 | + return munmap(base, size) == 0; |
362 | 473 | }
|
363 | 474 |
|
364 | 475 |
|
|
0 commit comments