diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc index 8ccaa6b..e5ab1e5 100644 --- a/libgo/runtime/malloc.goc +++ b/libgo/runtime/malloc.goc @@ -339,12 +339,14 @@ runtime_mallocinit(void) extern byte _end[]; byte *want; uintptr limit; + bool is_reserved; runtime_sizeof_C_MStats = sizeof(MStats); p = nil; arena_size = 0; bitmap_size = 0; + is_reserved = false; // for 64-bit build USED(p); @@ -389,7 +391,8 @@ runtime_mallocinit(void) // If this fails we fall back to the 32 bit memory mechanism arena_size = MaxMem; bitmap_size = arena_size / (sizeof(void*)*8/4); - p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size); + p = runtime_SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size, false); + is_reserved = false; } if (p == nil) { // On a 32-bit machine, we can't typically get away @@ -428,7 +431,8 @@ runtime_mallocinit(void) want = (byte*)(((uintptr)_end + (1<<18) + (1<<20) - 1)&~((1<<20)-1)); if(0xffffffff - (uintptr)want <= bitmap_size + arena_size) want = 0; - p = runtime_SysReserve(want, bitmap_size + arena_size); + p = runtime_SysReserve(want, bitmap_size + arena_size, true); + is_reserved = true; if(p == nil) runtime_throw("runtime: cannot reserve arena virtual address space"); if((uintptr)p & (((uintptr)1<arena_start = p + bitmap_size; runtime_mheap->arena_used = runtime_mheap->arena_start; runtime_mheap->arena_end = runtime_mheap->arena_start + arena_size; + runtime_mheap->is_reserved = is_reserved; // Initialize the rest of the allocator. runtime_MHeap_Init(runtime_mheap, runtime_SysAlloc); @@ -462,12 +467,15 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n) byte *new_end; uintptr needed; + if (!h->is_reserved) { + runtime_throw("runtime: cannot allocate memory"); + } needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; // Round wanted arena size to a multiple of 256MB. needed = (needed + (256<<20) - 1) & ~((256<<20)-1); new_end = h->arena_end + needed; if(new_end <= h->arena_start + MaxArena32) { - p = runtime_SysReserve(h->arena_end, new_end - h->arena_end); + p = runtime_SysReserve(h->arena_end, new_end - h->arena_end, true); if(p == h->arena_end) h->arena_end = new_end; } @@ -475,7 +483,7 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n) if(n <= (uintptr)(h->arena_end - h->arena_used)) { // Keep taking from our reservation. p = h->arena_used; - runtime_SysMap(p, n); + runtime_SysMap(p, n, h->is_reserved); h->arena_used += n; runtime_MHeap_MapBits(h); if(raceenabled) diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h index ebea34e..da6845c 100644 --- a/libgo/runtime/malloc.h +++ b/libgo/runtime/malloc.h @@ -177,8 +177,8 @@ struct MLink void* runtime_SysAlloc(uintptr nbytes); void runtime_SysFree(void *v, uintptr nbytes); void runtime_SysUnused(void *v, uintptr nbytes); -void runtime_SysMap(void *v, uintptr nbytes); -void* runtime_SysReserve(void *v, uintptr nbytes); +void runtime_SysMap(void *v, uintptr nbytes, bool do_reserve); +void* runtime_SysReserve(void *v, uintptr nbytes, bool is_reserved); // FixAlloc is a simple free-list allocator for fixed size objects. // Malloc uses a FixAlloc wrapped around SysAlloc to manages its @@ -434,6 +434,7 @@ struct MHeap FixAlloc spanalloc; // allocator for Span* FixAlloc cachealloc; // allocator for MCache* + bool is_reserved; // is the all the address space for this heap actually reserved by us? }; extern MHeap *runtime_mheap; diff --git a/libgo/runtime/mem.c b/libgo/runtime/mem.c index 8481e95..b45534d 100644 --- a/libgo/runtime/mem.c +++ b/libgo/runtime/mem.c @@ -110,7 +110,7 @@ runtime_SysFree(void *v, uintptr n) } void* -runtime_SysReserve(void *v, uintptr n) +runtime_SysReserve(void *v, uintptr n, bool do_reserve) { int fd = -1; void *p; @@ -130,7 +130,7 @@ runtime_SysReserve(void *v, uintptr n) // much address space. Instead, assume that the reservation is okay // if we can reserve at least 64K and check the assumption in SysMap. // Only user-mode Linux (UML) rejects these requests. - if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) { + if(!do_reserve) { p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0); if (p != v) return nil; @@ -149,7 +149,7 @@ runtime_SysReserve(void *v, uintptr n) } void -runtime_SysMap(void *v, uintptr n) +runtime_SysMap(void *v, uintptr n, bool is_reserved) { void *p; int fd = -1; @@ -168,7 +168,7 @@ runtime_SysMap(void *v, uintptr n) #endif // On 64-bit, we don't actually have v reserved, so tread carefully. - if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) { + if(!is_reserved) { p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0); if(p == MAP_FAILED && errno == ENOMEM) runtime_throw("runtime: out of memory"); diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c index c3b3211..bd2157c 100644 --- a/libgo/runtime/mgc0.c +++ b/libgo/runtime/mgc0.c @@ -2483,6 +2483,6 @@ runtime_MHeap_MapBits(MHeap *h) page_size = getpagesize(); n = (n+page_size-1) & ~(page_size-1); - runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped); + runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->is_reserved); h->bitmap_mapped = n; }