@@ -1925,21 +1925,10 @@ static void vmembk_print_on(outputStream* os) {
19251925// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
19261926// address. Failing that, it will attach the memory anywhere.
19271927// If <requested_addr> is NULL, function will attach the memory anywhere.
1928- //
1929- // <alignment_hint> is being ignored by this function. It is very probable however that the
1930- // alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1931- // Should this be not enogh, we can put more work into it.
1932- static char * reserve_shmated_memory (
1933- size_t bytes,
1934- char * requested_addr,
1935- size_t alignment_hint) {
1928+ static char * reserve_shmated_memory (size_t bytes, char * requested_addr) {
19361929
19371930 trcVerbose (" reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1938- PTR_FORMAT " , alignment_hint " UINTX_FORMAT " ..." ,
1939- bytes, p2i (requested_addr), alignment_hint);
1940-
1941- // Either give me wish address or wish alignment but not both.
1942- assert0 (!(requested_addr != NULL && alignment_hint != 0 ));
1931+ PTR_FORMAT " ..." , bytes, p2i (requested_addr));
19431932
19441933 // We must prevent anyone from attaching too close to the
19451934 // BRK because that may cause malloc OOM.
@@ -2061,15 +2050,10 @@ static bool uncommit_shmated_memory(char* addr, size_t size) {
20612050// Reserve memory via mmap.
20622051// If <requested_addr> is given, an attempt is made to attach at the given address.
20632052// Failing that, memory is allocated at any address.
2064- // If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2065- // allocate at an address aligned with the given alignment. Failing that, memory
2066- // is aligned anywhere.
2067- static char * reserve_mmaped_memory (size_t bytes, char * requested_addr, size_t alignment_hint) {
2068- trcVerbose (" reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT " , "
2069- " alignment_hint " UINTX_FORMAT " ..." ,
2070- bytes, p2i (requested_addr), alignment_hint);
2071-
2072- // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2053+ static char * reserve_mmaped_memory (size_t bytes, char * requested_addr) {
2054+ trcVerbose (" reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT " ..." ,
2055+ bytes, p2i (requested_addr));
2056+
20732057 if (requested_addr && !is_aligned_to (requested_addr, os::vm_page_size ()) != 0 ) {
20742058 trcVerbose (" Wish address " PTR_FORMAT " not aligned to page boundary." , p2i (requested_addr));
20752059 return NULL ;
@@ -2084,26 +2068,21 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
20842068 requested_addr = NULL ;
20852069 }
20862070
2087- // Specify one or the other but not both.
2088- assert0 (!(requested_addr != NULL && alignment_hint > 0 ));
2089-
2090- // In 64K mode, we claim the global page size (os::vm_page_size())
2091- // is 64K. This is one of the few points where that illusion may
2092- // break, because mmap() will always return memory aligned to 4K. So
2093- // we must ensure we only ever return memory aligned to 64k.
2094- if (alignment_hint) {
2095- alignment_hint = lcm (alignment_hint, os::vm_page_size ());
2096- } else {
2097- alignment_hint = os::vm_page_size ();
2098- }
2071+ // In 64K mode, we lie and claim the global page size (os::vm_page_size()) is 64K
2072+ // (complicated story). This mostly works just fine since 64K is a multiple of the
2073+ // actual 4K lowest page size. Only at a few seams light shines thru, e.g. when
2074+ // calling mmap. mmap will return memory aligned to the lowest pages size - 4K -
2075+ // so we must make sure - transparently - that the caller only ever sees 64K
2076+ // aligned mapping start addresses.
2077+ const size_t alignment = os::vm_page_size ();
20992078
21002079 // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
21012080 const size_t size = align_up (bytes, os::vm_page_size ());
21022081
21032082 // alignment: Allocate memory large enough to include an aligned range of the right size and
21042083 // cut off the leading and trailing waste pages.
2105- assert0 (alignment_hint != 0 && is_aligned_to (alignment_hint , os::vm_page_size ())); // see above
2106- const size_t extra_size = size + alignment_hint ;
2084+ assert0 (alignment != 0 && is_aligned_to (alignment , os::vm_page_size ())); // see above
2085+ const size_t extra_size = size + alignment ;
21072086
21082087 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
21092088 // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
@@ -2131,7 +2110,7 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
21312110 }
21322111
21332112 // Handle alignment.
2134- char * const addr_aligned = align_up (addr, alignment_hint );
2113+ char * const addr_aligned = align_up (addr, alignment );
21352114 const size_t waste_pre = addr_aligned - addr;
21362115 char * const addr_aligned_end = addr_aligned + size;
21372116 const size_t waste_post = extra_size - waste_pre - size;
@@ -2347,21 +2326,19 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
23472326}
23482327
23492328// Reserves and attaches a shared memory segment.
2350- char * os::pd_reserve_memory (size_t bytes, size_t alignment_hint ) {
2329+ char * os::pd_reserve_memory (size_t bytes) {
23512330 // Always round to os::vm_page_size(), which may be larger than 4K.
23522331 bytes = align_up (bytes, os::vm_page_size ());
2353- const size_t alignment_hint0 =
2354- alignment_hint ? align_up (alignment_hint, os::vm_page_size ()) : 0 ;
23552332
23562333 // In 4K mode always use mmap.
23572334 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
23582335 if (os::vm_page_size () == 4 *K) {
2359- return reserve_mmaped_memory (bytes, NULL /* requested_addr */ , alignment_hint );
2336+ return reserve_mmaped_memory (bytes, NULL /* requested_addr */ );
23602337 } else {
23612338 if (bytes >= Use64KPagesThreshold) {
2362- return reserve_shmated_memory (bytes, NULL /* requested_addr */ , alignment_hint );
2339+ return reserve_shmated_memory (bytes, NULL /* requested_addr */ );
23632340 } else {
2364- return reserve_mmaped_memory (bytes, NULL /* requested_addr */ , alignment_hint );
2341+ return reserve_mmaped_memory (bytes, NULL /* requested_addr */ );
23652342 }
23662343 }
23672344}
@@ -2538,7 +2515,7 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, int f
25382515
25392516 // Always round to os::vm_page_size(), which may be larger than 4K.
25402517 bytes = align_up (bytes, os::vm_page_size ());
2541- result = reserve_mmaped_memory (bytes, requested_addr, 0 );
2518+ result = reserve_mmaped_memory (bytes, requested_addr);
25422519
25432520 if (result != NULL ) {
25442521 if (replace_existing_mapping_with_file_mapping (result, bytes, file_desc) == NULL ) {
@@ -2559,12 +2536,12 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) {
25592536 // In 4K mode always use mmap.
25602537 // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
25612538 if (os::vm_page_size () == 4 *K) {
2562- return reserve_mmaped_memory (bytes, requested_addr, 0 );
2539+ return reserve_mmaped_memory (bytes, requested_addr);
25632540 } else {
25642541 if (bytes >= Use64KPagesThreshold) {
2565- return reserve_shmated_memory (bytes, requested_addr, 0 );
2542+ return reserve_shmated_memory (bytes, requested_addr);
25662543 } else {
2567- return reserve_mmaped_memory (bytes, requested_addr, 0 );
2544+ return reserve_mmaped_memory (bytes, requested_addr);
25682545 }
25692546 }
25702547
0 commit comments