Skip to content

Commit f665e07

Browse files
author
Afshin Zafari
committed
8331540: [BACKOUT] NMT: add/make a mandatory MEMFLAGS argument to family of os::reserve/commit/uncommit memory API
Reviewed-by: jwilhelm
1 parent a10845b commit f665e07

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+420
-398
lines changed

src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
6969
const uint64_t immediate = ((uint64_t)immediates[index]) << 32;
7070
assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate),
7171
"Invalid immediate %d " UINT64_FORMAT, index, immediate);
72-
result = os::attempt_reserve_memory_at((char*)immediate, size, !ExecMem, mtClass);
72+
result = os::attempt_reserve_memory_at((char*)immediate, size, false);
7373
if (result == nullptr) {
7474
log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate);
7575
}
@@ -112,7 +112,7 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
112112
if (result == nullptr) {
113113
constexpr size_t alignment = nth_bit(32);
114114
log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address");
115-
result = os::reserve_memory_aligned(size, alignment, !ExecMem, mtClass);
115+
result = os::reserve_memory_aligned(size, alignment, false);
116116
}
117117

118118
return result;

src/hotspot/os/aix/os_aix.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1805,7 +1805,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
18051805
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
18061806
}
18071807

1808-
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
1808+
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
18091809
}
18101810

18111811
size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
@@ -1847,7 +1847,7 @@ bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, siz
18471847
}
18481848

18491849
// Reserves and attaches a shared memory segment.
1850-
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
1850+
char* os::pd_reserve_memory(size_t bytes, bool exec) {
18511851
// Always round to os::vm_page_size(), which may be larger than 4K.
18521852
bytes = align_up(bytes, os::vm_page_size());
18531853

@@ -1996,7 +1996,7 @@ void os::large_page_init() {
19961996
return; // Nothing to do. See query_multipage_support and friends.
19971997
}
19981998

1999-
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
1999+
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
20002000
fatal("os::reserve_memory_special should not be called on AIX.");
20012001
return nullptr;
20022002
}
@@ -2015,7 +2015,7 @@ bool os::can_commit_large_page_memory() {
20152015
return false;
20162016
}
20172017

2018-
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
2018+
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
20192019
assert(file_desc >= 0, "file_desc is not valid");
20202020
char* result = nullptr;
20212021

@@ -2033,7 +2033,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
20332033

20342034
// Reserve memory at an arbitrary address, only if that area is
20352035
// available (and not reserved for something else).
2036-
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
2036+
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
20372037
char* addr = nullptr;
20382038

20392039
// Always round to os::vm_page_size(), which may be larger than 4K.

src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@ XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
7878
_initialized(false) {
7979

8080
// Reserve address space for backing memory
81-
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
81+
_base = (uintptr_t)os::reserve_memory(max_capacity);
8282
if (_base == 0) {
8383
// Failed
8484
log_error_pd(gc)("Failed to reserve address space for backing memory");

src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
7979
_initialized(false) {
8080

8181
// Reserve address space for backing memory
82-
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
82+
_base = (uintptr_t)os::reserve_memory(max_capacity);
8383
if (_base == 0) {
8484
// Failed
8585
log_error_pd(gc)("Failed to reserve address space for backing memory");

src/hotspot/os/bsd/os_bsd.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1668,7 +1668,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
16681668
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
16691669
}
16701670

1671-
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
1671+
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
16721672
::madvise(addr, bytes, MADV_DONTNEED);
16731673
}
16741674

@@ -1766,13 +1766,13 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
17661766
}
17671767

17681768
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
1769-
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
1769+
return os::commit_memory(addr, size, !ExecMem);
17701770
}
17711771

17721772
// If this is a growable mapping, remove the guard pages entirely by
17731773
// munmap()ping them. If not, just call uncommit_memory().
17741774
bool os::remove_stack_guard_pages(char* addr, size_t size) {
1775-
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
1775+
return os::uncommit_memory(addr, size);
17761776
}
17771777

17781778
// 'requested_addr' is only treated as a hint, the return value may or
@@ -1809,7 +1809,7 @@ static int anon_munmap(char * addr, size_t size) {
18091809
}
18101810
}
18111811

1812-
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
1812+
char* os::pd_reserve_memory(size_t bytes, bool exec) {
18131813
return anon_mmap(nullptr /* addr */, bytes, exec);
18141814
}
18151815

@@ -1869,7 +1869,7 @@ void os::large_page_init() {
18691869
}
18701870

18711871

1872-
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
1872+
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
18731873
fatal("os::reserve_memory_special should not be called on BSD.");
18741874
return nullptr;
18751875
}
@@ -1888,9 +1888,9 @@ bool os::can_commit_large_page_memory() {
18881888
return false;
18891889
}
18901890

1891-
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
1891+
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
18921892
assert(file_desc >= 0, "file_desc is not valid");
1893-
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
1893+
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
18941894
if (result != nullptr) {
18951895
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
18961896
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
@@ -1902,7 +1902,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
19021902
// Reserve memory at an arbitrary address, only if that area is
19031903
// available (and not reserved for something else).
19041904

1905-
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
1905+
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
19061906
// Assert only that the size is a multiple of the page size, since
19071907
// that's all that mmap requires, and since that's all we really know
19081908
// about at this low abstraction level. If we need higher alignment,

src/hotspot/os/linux/os_linux.cpp

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3023,14 +3023,14 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
30233023
}
30243024
}
30253025

3026-
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
3026+
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
30273027
// This method works by doing an mmap over an existing mmaping and effectively discarding
30283028
// the existing pages. However it won't work for SHM-based large pages that cannot be
30293029
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
30303030
// small pages on top of the SHM segment. This method always works for small pages, so we
30313031
// allow that in any case.
30323032
if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) {
3033-
commit_memory(addr, bytes, alignment_hint, !ExecMem, flag);
3033+
commit_memory(addr, bytes, alignment_hint, !ExecMem);
30343034
}
30353035
}
30363036

@@ -3637,7 +3637,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
36373637
}
36383638
}
36393639

3640-
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
3640+
return os::commit_memory(addr, size, !ExecMem);
36413641
}
36423642

36433643
// If this is a growable mapping, remove the guard pages entirely by
@@ -3653,7 +3653,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
36533653
return ::munmap(addr, size) == 0;
36543654
}
36553655

3656-
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
3656+
return os::uncommit_memory(addr, size);
36573657
}
36583658

36593659
// 'requested_addr' is only treated as a hint, the return value may or
@@ -3757,7 +3757,7 @@ static int anon_munmap(char * addr, size_t size) {
37573757
return 1;
37583758
}
37593759

3760-
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
3760+
char* os::pd_reserve_memory(size_t bytes, bool exec) {
37613761
return anon_mmap(nullptr, bytes);
37623762
}
37633763

@@ -4214,7 +4214,7 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes,
42144214
}
42154215

42164216
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
4217-
char* req_addr, bool exec, MEMFLAGS flag) {
4217+
char* req_addr, bool exec) {
42184218
assert(UseLargePages, "only for large pages");
42194219

42204220
char* const addr = reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
@@ -4249,9 +4249,9 @@ bool os::can_commit_large_page_memory() {
42494249
return UseTransparentHugePages;
42504250
}
42514251

4252-
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
4252+
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
42534253
assert(file_desc >= 0, "file_desc is not valid");
4254-
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
4254+
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
42554255
if (result != nullptr) {
42564256
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
42574257
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
@@ -4263,7 +4263,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
42634263
// Reserve memory at an arbitrary address, only if that area is
42644264
// available (and not reserved for something else).
42654265

4266-
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
4266+
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
42674267
// Assert only that the size is a multiple of the page size, since
42684268
// that's all that mmap requires, and since that's all we really know
42694269
// about at this low abstraction level. If we need higher alignment,
@@ -4655,21 +4655,23 @@ static void workaround_expand_exec_shield_cs_limit() {
46554655
*/
46564656
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
46574657
(StackOverflow::stack_guard_zone_size() + page_size));
4658-
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);
4658+
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
46594659

46604660
if (codebuf == nullptr) {
46614661
// JDK-8197429: There may be a stack gap of one megabyte between
46624662
// the limit of the stack and the nearest memory region: this is a
46634663
// Linux kernel workaround for CVE-2017-1000364. If we failed to
46644664
// map our codebuf, try again at an address one megabyte lower.
46654665
hint -= 1 * M;
4666-
codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);
4666+
codebuf = os::attempt_reserve_memory_at(hint, page_size);
46674667
}
46684668

4669-
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, ExecMem, mtInternal))) {
4669+
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
46704670
return; // No matter, we tried, best effort.
46714671
}
46724672

4673+
MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
4674+
46734675
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
46744676

46754677
// Some code to exec: the 'ret' instruction

src/hotspot/os/posix/os_posix.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -395,9 +395,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
395395
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
396396
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
397397
// rather than unmapping and remapping the whole chunk to get requested alignment.
398-
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec, MEMFLAGS flag) {
398+
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
399399
size_t extra_size = calculate_aligned_extra_size(size, alignment);
400-
char* extra_base = os::reserve_memory(extra_size, exec, flag);
400+
char* extra_base = os::reserve_memory(extra_size, exec);
401401
if (extra_base == nullptr) {
402402
return nullptr;
403403
}
@@ -421,7 +421,7 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_des
421421
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == nullptr) {
422422
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
423423
}
424-
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC, flag);
424+
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
425425
return aligned_base;
426426
}
427427

src/hotspot/os/posix/perfMemory_posix.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
33
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
44
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
55
*
@@ -65,14 +65,14 @@ static char* backing_store_file_name = nullptr; // name of the backing store
6565
static char* create_standard_memory(size_t size) {
6666

6767
// allocate an aligned chuck of memory
68-
char* mapAddress = os::reserve_memory(size, !ExecMem, mtInternal);
68+
char* mapAddress = os::reserve_memory(size);
6969

7070
if (mapAddress == nullptr) {
7171
return nullptr;
7272
}
7373

7474
// commit memory
75-
if (!os::commit_memory(mapAddress, size, !ExecMem, mtInternal)) {
75+
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
7676
if (PrintMiscellaneous && Verbose) {
7777
warning("Could not commit PerfData memory\n");
7878
}

0 commit comments

Comments
 (0)