diff --git a/src/hotspot/share/classfile/vmClassMacros.hpp b/src/hotspot/share/classfile/vmClassMacros.hpp index 9f86d39c432..bb2babff25a 100644 --- a/src/hotspot/share/classfile/vmClassMacros.hpp +++ b/src/hotspot/share/classfile/vmClassMacros.hpp @@ -31,7 +31,7 @@ #define VM_CLASS_ID(kname) vmClassID::_VM_CLASS_ENUM(kname) // VM_CLASSES_DO iterates the classes that are directly referenced -// by the VM, suhch as java.lang.Object and java.lang.String. These +// by the VM, such as java.lang.Object and java.lang.String. These // classes are resolved at VM bootstrap, before any Java code is executed, // so no class loader is able to provide a different definition. // diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp index b344d5ed932..1677597e72b 100644 --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -760,7 +760,7 @@ template(decodeAndThrowThrowable_name, "decodeAndThrowThrowable") \ template(encodeAnnotations_name, "encodeAnnotations") \ template(encodeAnnotations_signature, "([BLjava/lang/Class;Ljdk/internal/reflect/ConstantPool;Z[Ljava/lang/Class;)[B")\ - template(decodeAndThrowThrowable_signature, "(JZ)V") \ + template(decodeAndThrowThrowable_signature, "(IJZ)V") \ template(classRedefinedCount_name, "classRedefinedCount") \ template(classLoader_name, "classLoader") \ template(componentType_name, "componentType") \ diff --git a/src/hotspot/share/gc/z/zGlobals.hpp b/src/hotspot/share/gc/z/zGlobals.hpp index 0ca08942080..f7c0a3eaa26 100644 --- a/src/hotspot/share/gc/z/zGlobals.hpp +++ b/src/hotspot/share/gc/z/zGlobals.hpp @@ -38,6 +38,9 @@ const size_t ZGranuleSize = (size_t)1 << ZGranuleSizeShift // Virtual memory to physical memory ratio const size_t ZVirtualToPhysicalRatio = 16; // 16:1 +// Max virtual memory ranges +const size_t ZMaxVirtualReservations = 100; // Each reservation at least 1% of total + // Page size shifts const size_t ZPageSizeSmallShift = ZGranuleSizeShift; extern size_t ZPageSizeMediumShift; diff --git a/src/hotspot/share/gc/z/zMemory.cpp b/src/hotspot/share/gc/z/zMemory.cpp index d665d1181b0..7bc6b7379c5 100644 --- a/src/hotspot/share/gc/z/zMemory.cpp +++ b/src/hotspot/share/gc/z/zMemory.cpp @@ -81,6 +81,10 @@ ZMemoryManager::ZMemoryManager() : _freelist(), _callbacks() {} +bool ZMemoryManager::free_is_contiguous() const { + return _freelist.size() == 1; +} + void ZMemoryManager::register_callbacks(const Callbacks& callbacks) { _callbacks = callbacks; } diff --git a/src/hotspot/share/gc/z/zMemory.hpp b/src/hotspot/share/gc/z/zMemory.hpp index e75ac071d1d..ce9100f8026 100644 --- a/src/hotspot/share/gc/z/zMemory.hpp +++ b/src/hotspot/share/gc/z/zMemory.hpp @@ -81,6 +81,8 @@ class ZMemoryManager { public: ZMemoryManager(); + bool free_is_contiguous() const; + void register_callbacks(const Callbacks& callbacks); zoffset peek_low_address() const; diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp new file mode 100644 index 00000000000..d8753dc2283 --- /dev/null +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zNMT.hpp" +#include "gc/z/zVirtualMemory.hpp" +#include "memory/allocation.hpp" +#include "services/memTracker.hpp" +#include "utilities/nativeCallStack.hpp" + +ZNMT::Reservation ZNMT::_reservations[ZMaxVirtualReservations] = {}; +size_t ZNMT::_num_reservations = 0; + +size_t ZNMT::reservation_index(zoffset offset, size_t* offset_in_reservation) { + assert(_num_reservations > 0, "at least one reservation must exist"); + + size_t index = 0; + *offset_in_reservation = untype(offset); + for (; index < _num_reservations; ++index) { + const size_t reservation_size = _reservations[index]._size; + if (*offset_in_reservation < reservation_size) { + break; + } + *offset_in_reservation -= reservation_size; + } + + assert(index != _num_reservations, "failed to find reservation index"); + return index; +} + +void ZNMT::process_fake_mapping(zoffset offset, size_t size, bool commit) { + // In order to satisfy NTM's requirement of an 1:1 mapping between committed + // and reserved addresses, a fake mapping from the offset into the reservation + // is used. + // + // These mappings from + // [offset, offset + size) -> {[virtual address range], ...} + // are stable after the heap has been reserved. No commits proceed any + // reservations. Committing and uncommitting the same [offset, offset + size) + // range will result in same virtual memory ranges. + + size_t left_to_process = size; + size_t offset_in_reservation; + for (size_t i = reservation_index(offset, &offset_in_reservation); i < _num_reservations; ++i) { + const zaddress_unsafe reservation_start = _reservations[i]._start; + const size_t reservation_size = _reservations[i]._size; + const size_t sub_range_size = MIN2(left_to_process, reservation_size - offset_in_reservation); + const uintptr_t sub_range_addr = untype(reservation_start) + offset_in_reservation; + + // commit / uncommit memory + if (commit) { + MemTracker::record_virtual_memory_commit((void*)sub_range_addr, sub_range_size, CALLER_PC); + } else { + if (MemTracker::enabled()) { + Tracker tracker(Tracker::uncommit); + tracker.record((address)sub_range_addr, sub_range_size); + } + } + + left_to_process -= sub_range_size; + if (left_to_process == 0) { + // Processed all nmt registrations + return; + } + + offset_in_reservation = 0; + } + + assert(left_to_process == 0, "everything was not commited"); +} + +void ZNMT::reserve(zaddress_unsafe start, size_t size) { + assert(_num_reservations < ZMaxVirtualReservations, "too many reservations"); + // Keep track of the reservations made in order to create fake mappings + // between the reserved and commited memory. + // See details in ZNMT::process_fake_mapping + _reservations[_num_reservations++] = {start, size}; + + MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC, mtJavaHeap); +} + +void ZNMT::commit(zoffset offset, size_t size) { + // NMT expects a 1-to-1 mapping between virtual and physical memory. + // ZGC can temporarily have multiple virtual addresses pointing to + // the same physical memory. + // + // When this function is called we don't know where in the virtual memory + // this physical memory will be mapped. So we fake the virtual memory + // address by mapping the physical offset into offsets in the reserved + // memory space. + process_fake_mapping(offset, size, true); +} + +void ZNMT::uncommit(zoffset offset, size_t size) { + // We fake the virtual memory address by mapping the physical offset + // into offsets in the reserved memory space. + // See comment in ZNMT::commit + process_fake_mapping(offset, size, false); +} diff --git a/src/hotspot/share/gc/z/zNMT.hpp b/src/hotspot/share/gc/z/zNMT.hpp new file mode 100644 index 00000000000..118f8654ade --- /dev/null +++ b/src/hotspot/share/gc/z/zNMT.hpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZNMT_HPP +#define SHARE_GC_Z_ZNMT_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zMemory.hpp" +#include "gc/z/zVirtualMemory.hpp" +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/nativeCallStack.hpp" + +class ZNMT : public AllStatic { +private: + struct Reservation { + zaddress_unsafe _start; + size_t _size; + }; + static Reservation _reservations[ZMaxVirtualReservations]; + static size_t _num_reservations; + + static size_t reservation_index(zoffset offset, size_t* offset_in_reservation); + static void process_fake_mapping(zoffset offset, size_t size, bool commit); + +public: + static void reserve(zaddress_unsafe start, size_t size); + static void commit(zoffset offset, size_t size); + static void uncommit(zoffset offset, size_t size); +}; + +#endif // SHARE_GC_Z_ZNMT_HPP diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.cpp b/src/hotspot/share/gc/z/zPhysicalMemory.cpp index 7631708df86..c0e0e837c9c 100644 --- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp +++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp @@ -27,6 +27,7 @@ #include "gc/z/zArray.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLargePages.inline.hpp" +#include "gc/z/zNMT.hpp" #include "gc/z/zNUMA.inline.hpp" #include "gc/z/zPhysicalMemory.inline.hpp" #include "logging/log.hpp" @@ -34,7 +35,6 @@ #include "runtime/globals_extension.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" -#include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -275,26 +275,6 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay); } -void ZPhysicalMemoryManager::nmt_commit(zoffset offset, size_t size) const { - // NMT expects a 1-to-1 mapping between virtual and physical memory. - // ZGC can temporarily have multiple virtual addresses pointing to - // the same physical memory. - // - // When this function is called we don't know where in the virtual memory - // this physical memory will be mapped. So we fake that the virtual memory - // address is the heap base + the given offset. - const uintptr_t addr = ZAddressHeapBase + untype(offset); - MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC); -} - -void ZPhysicalMemoryManager::nmt_uncommit(zoffset offset, size_t size) const { - if (MemTracker::enabled()) { - const uintptr_t addr = ZAddressHeapBase + untype(offset); - Tracker tracker(Tracker::uncommit); - tracker.record((address)addr, size); - } -} - void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) { assert(is_aligned(size, ZGranuleSize), "Invalid size"); @@ -329,7 +309,7 @@ bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) { const size_t committed = _backing.commit(segment.start(), segment.size()); // Register with NMT - nmt_commit(segment.start(), committed); + ZNMT::commit(segment.start(), committed); // Register committed segment if (!pmem.commit_segment(i, committed)) { @@ -355,7 +335,7 @@ bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) { const size_t uncommitted = _backing.uncommit(segment.start(), segment.size()); // Unregister with NMT - nmt_uncommit(segment.start(), uncommitted); + ZNMT::uncommit(segment.start(), uncommitted); // Deregister uncommitted segment if (!pmem.uncommit_segment(i, uncommitted)) { diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.hpp index 0cf77dad739..2244732a146 100644 --- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp +++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp @@ -84,9 +84,6 @@ class ZPhysicalMemoryManager { ZPhysicalMemoryBacking _backing; ZMemoryManager _manager; - void nmt_commit(zoffset offset, size_t size) const; - void nmt_uncommit(zoffset offset, size_t size) const; - void pretouch_view(zaddress addr, size_t size) const; void map_view(zaddress_unsafe addr, const ZPhysicalMemory& pmem) const; void unmap_view(zaddress_unsafe addr, size_t size) const; diff --git a/src/hotspot/share/gc/z/zVirtualMemory.cpp b/src/hotspot/share/gc/z/zVirtualMemory.cpp index 37eb42ddb12..6b53b2ba7c8 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.cpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp @@ -22,12 +22,13 @@ */ #include "precompiled.hpp" +#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcLogPrecious.hpp" #include "gc/z/zAddress.inline.hpp" #include "gc/z/zAddressSpaceLimit.hpp" #include "gc/z/zGlobals.hpp" +#include "gc/z/zNMT.hpp" #include "gc/z/zVirtualMemory.inline.hpp" -#include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -54,6 +55,39 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) _initialized = true; } +#ifdef ASSERT +size_t ZVirtualMemoryManager::force_reserve_discontiguous(size_t size) { + const size_t min_range = calculate_min_range(size); + const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range); + size_t reserved = 0; + + // Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory + // ranges. Starting with higher addresses. + uintptr_t end = ZAddressOffsetMax; + while (reserved < size && end >= max_range) { + const size_t remaining = size - reserved; + const size_t reserve_size = MIN2(max_range, remaining); + const uintptr_t reserve_start = end - reserve_size; + + if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) { + reserved += reserve_size; + } + + end -= reserve_size * 2; + } + + // If (reserved < size) attempt to reserve the rest via normal divide and conquer + uintptr_t start = 0; + while (reserved < size && start < ZAddressOffsetMax) { + const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); + reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); + start += remaining; + } + + return reserved; +} +#endif + size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, size_t min_range) { if (size < min_range) { // Too small @@ -75,15 +109,20 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, // Divide and conquer const size_t first_part = align_down(half, ZGranuleSize); const size_t second_part = size - first_part; - return reserve_discontiguous(start, first_part, min_range) + - reserve_discontiguous(start + first_part, second_part, min_range); + const size_t first_size = reserve_discontiguous(start, first_part, min_range); + const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range); + return first_size + second_size; } -size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) { +size_t ZVirtualMemoryManager::calculate_min_range(size_t size) { // Don't try to reserve address ranges smaller than 1% of the requested size. // This avoids an explosion of reservation attempts in case large parts of the // address space is already occupied. - const size_t min_range = align_up(size / 100, ZGranuleSize); + return align_up(size / ZMaxVirtualReservations, ZGranuleSize); +} + +size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) { + const size_t min_range = calculate_min_range(size); uintptr_t start = 0; size_t reserved = 0; @@ -98,7 +137,7 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) { } bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) { - assert(is_aligned(size, ZGranuleSize), "Must be granule aligned"); + assert(is_aligned(size, ZGranuleSize), "Must be granule aligned " SIZE_FORMAT_X, size); // Reserve address views const zaddress_unsafe addr = ZOffset::address_unsafe(start); @@ -109,7 +148,7 @@ bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) { } // Register address views with native memory tracker - nmt_reserve(addr, size); + ZNMT::reserve(addr, size); // Make the address range free _manager.free(start, size); @@ -137,15 +176,25 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) { const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap()); const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit); - size_t reserved = size; - bool contiguous = true; + auto do_reserve = [&]() { +#ifdef ASSERT + if (ZForceDiscontiguousHeapReservations > 0) { + return force_reserve_discontiguous(size); + } +#endif + + // Prefer a contiguous address space + if (reserve_contiguous(size)) { + return size; + } - // Prefer a contiguous address space - if (!reserve_contiguous(size)) { // Fall back to a discontiguous address space - reserved = reserve_discontiguous(size); - contiguous = false; - } + return reserve_discontiguous(size); + }; + + const size_t reserved = do_reserve(); + + const bool contiguous = _manager.free_is_contiguous(); log_info_p(gc, init)("Address Space Type: %s/%s/%s", (contiguous ? "Contiguous" : "Discontiguous"), @@ -159,11 +208,6 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) { return reserved >= max_capacity; } -void ZVirtualMemoryManager::nmt_reserve(zaddress_unsafe start, size_t size) { - MemTracker::record_virtual_memory_reserve((void*)untype(start), size, CALLER_PC); - MemTracker::record_virtual_memory_type((void*)untype(start), mtJavaHeap); -} - bool ZVirtualMemoryManager::is_initialized() const { return _initialized; } @@ -179,6 +223,10 @@ ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) start = _manager.alloc_high_address(size); } + if (start == zoffset(UINTPTR_MAX)) { + return ZVirtualMemory(); + } + return ZVirtualMemory(start, size); } diff --git a/src/hotspot/share/gc/z/zVirtualMemory.hpp b/src/hotspot/share/gc/z/zVirtualMemory.hpp index 153cad81be4..faf7cdc6216 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.hpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp @@ -24,6 +24,7 @@ #ifndef SHARE_GC_Z_ZVIRTUALMEMORY_HPP #define SHARE_GC_Z_ZVIRTUALMEMORY_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zMemory.hpp" class ZVirtualMemory { @@ -47,6 +48,8 @@ class ZVirtualMemory { class ZVirtualMemoryManager { private: + static size_t calculate_min_range(size_t size); + ZMemoryManager _manager; size_t _reserved; bool _initialized; @@ -63,7 +66,7 @@ class ZVirtualMemoryManager { size_t reserve_discontiguous(size_t size); bool reserve(size_t max_capacity); - void nmt_reserve(zaddress_unsafe start, size_t size); + DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);) public: ZVirtualMemoryManager(size_t max_capacity); diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp index 1ff63ca9c6c..01fb0e0a91f 100644 --- a/src/hotspot/share/gc/z/z_globals.hpp +++ b/src/hotspot/share/gc/z/z_globals.hpp @@ -24,7 +24,8 @@ #ifndef SHARE_GC_Z_Z_GLOBALS_HPP #define SHARE_GC_Z_Z_GLOBALS_HPP -#include "zPageAge.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zPageAge.hpp" #define GC_Z_FLAGS(develop, \ develop_pd, \ @@ -68,7 +69,17 @@ \ product(int, ZTenuringThreshold, -1, DIAGNOSTIC, \ "Young generation tenuring threshold, -1 for dynamic computation")\ - range(-1, static_cast(ZPageAgeMax)) + range(-1, static_cast(ZPageAgeMax)) \ + \ + develop(size_t, ZForceDiscontiguousHeapReservations, 0, \ + "The gc will attempt to split the heap reservation into this " \ + "many reservations, subject to available virtual address space " \ + "and invariant restrictions. Higher virtual addresses are " \ + "preferred " \ + "0: Disabled " \ + "1: Attempt contiguous reservation starting at a higher address " \ + "N: Force that many reservations, if possible") \ + range(0, ZMaxVirtualReservations) // end of GC_Z_FLAGS diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 247551e1301..9e161892046 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -582,7 +582,7 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU TempNewSymbol class_name = SymbolTable::new_symbol(str); if (class_name->utf8_length() <= 1) { - JVMCI_THROW_MSG_0(InternalError, err_msg("Primitive type %s should be handled in Java code", class_name->as_C_string())); + JVMCI_THROW_MSG_0(InternalError, err_msg("Primitive type %s should be handled in Java code", str)); } JVMCIKlassHandle resolved_klass(THREAD); diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp index 430b7a59262..32759a1889a 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.cpp +++ b/src/hotspot/share/jvmci/jvmciEnv.cpp @@ -446,6 +446,15 @@ bool JVMCIEnv::pending_exception_as_string(const char** to_string, const char** // Shared code for translating an exception from HotSpot to libjvmci or vice versa. class ExceptionTranslation: public StackObj { protected: + enum DecodeFormat { + _encoded_ok = 0, // exception was successfully encoded into buffer + _buffer_alloc_fail = 1, // native memory for buffer could not be allocated + _encode_oome_fail = 2, // OutOfMemoryError thrown during encoding + _encode_fail = 3 // some other problem occured during encoding. If buffer != 0, + // buffer contains a `struct { u4 len; char[len] desc}` + // describing the problem + }; + JVMCIEnv* _from_env; // Source of translation. Can be null. JVMCIEnv* _to_env; // Destination of translation. Never null. @@ -454,49 +463,34 @@ class ExceptionTranslation: public StackObj { // Encodes the exception in `_from_env` into `buffer`. // Where N is the number of bytes needed for the encoding, returns N if N <= `buffer_size` // and the encoding was written to `buffer` otherwise returns -N. - virtual int encode(JavaThread* THREAD, Klass* vmSupport, jlong buffer, int buffer_size) = 0; + virtual int encode(JavaThread* THREAD, jlong buffer, int buffer_size) = 0; // Decodes the exception in `buffer` in `_to_env` and throws it. - virtual void decode(JavaThread* THREAD, Klass* vmSupport, jlong buffer) = 0; + virtual void decode(JavaThread* THREAD, DecodeFormat format, jlong buffer) = 0; public: void doit(JavaThread* THREAD) { - // Resolve VMSupport class explicitly as HotSpotJVMCI::compute_offsets - // may not have been called. - Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, THREAD); - guarantee(!HAS_PENDING_EXCEPTION, ""); - int buffer_size = 2048; while (true) { ResourceMark rm; jlong buffer = (jlong) NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jbyte, buffer_size); if (buffer == 0L) { - decode(THREAD, vmSupport, 0L); + JVMCI_event_1("error translating exception: translation buffer allocation failed"); + decode(THREAD, _buffer_alloc_fail, 0L); return; } - int res = encode(THREAD, vmSupport, buffer, buffer_size); - if (_from_env != nullptr && !_from_env->is_hotspot() && _from_env->has_pending_exception()) { - // Cannot get name of exception thrown by `encode` as that involves - // calling into libjvmci which in turn can raise another exception. - _from_env->clear_pending_exception(); - decode(THREAD, vmSupport, -2L); - return; - } else if (HAS_PENDING_EXCEPTION) { - Symbol *ex_name = PENDING_EXCEPTION->klass()->name(); - CLEAR_PENDING_EXCEPTION; - if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { - decode(THREAD, vmSupport, -1L); - } else { - decode(THREAD, vmSupport, -2L); - } + int res = encode(THREAD, buffer, buffer_size); + if (_to_env->has_pending_exception()) { + // Propagate pending exception return; - } else if (res < 0) { + } + if (res < 0) { int required_buffer_size = -res; if (required_buffer_size > buffer_size) { buffer_size = required_buffer_size; } } else { - decode(THREAD, vmSupport, buffer); + decode(THREAD, _encoded_ok, buffer); if (!_to_env->has_pending_exception()) { _to_env->throw_InternalError("decodeAndThrowThrowable should have thrown an exception"); } @@ -511,7 +505,26 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation { private: const Handle& _throwable; - int encode(JavaThread* THREAD, Klass* vmSupport, jlong buffer, int buffer_size) { + int encode(JavaThread* THREAD, jlong buffer, int buffer_size) { + Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, THREAD); + if (HAS_PENDING_EXCEPTION) { + Handle throwable = Handle(THREAD, PENDING_EXCEPTION); + Symbol *ex_name = throwable->klass()->name(); + CLEAR_PENDING_EXCEPTION; + if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { + JVMCI_event_1("error translating exception: OutOfMemoryError"); + decode(THREAD, _encode_oome_fail, 0L); + } else { + char* char_buffer = (char*) buffer + 4; + stringStream st(char_buffer, (size_t) buffer_size - 4); + java_lang_Throwable::print_stack_trace(throwable, &st); + u4 len = (u4) st.size(); + *((u4*) buffer) = len; + JVMCI_event_1("error translating exception: %s", char_buffer); + decode(THREAD, _encode_fail, buffer); + } + return 0; + } JavaCallArguments jargs; jargs.push_oop(_throwable); jargs.push_long(buffer); @@ -524,11 +537,11 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation { return result.get_jint(); } - void decode(JavaThread* THREAD, Klass* vmSupport, jlong buffer) { + void decode(JavaThread* THREAD, DecodeFormat format, jlong buffer) { JNIAccessMark jni(_to_env, THREAD); jni()->CallStaticVoidMethod(JNIJVMCI::VMSupport::clazz(), JNIJVMCI::VMSupport::decodeAndThrowThrowable_method(), - buffer, false); + format, buffer, false); } public: HotSpotToSharedLibraryExceptionTranslation(JVMCIEnv* hotspot_env, JVMCIEnv* jni_env, const Handle& throwable) : @@ -540,15 +553,25 @@ class SharedLibraryToHotSpotExceptionTranslation : public ExceptionTranslation { private: jthrowable _throwable; - int encode(JavaThread* THREAD, Klass* vmSupport, jlong buffer, int buffer_size) { + int encode(JavaThread* THREAD, jlong buffer, int buffer_size) { JNIAccessMark jni(_from_env, THREAD); - return jni()->CallStaticIntMethod(JNIJVMCI::VMSupport::clazz(), + int res = jni()->CallStaticIntMethod(JNIJVMCI::VMSupport::clazz(), JNIJVMCI::VMSupport::encodeThrowable_method(), _throwable, buffer, buffer_size); + if (jni()->ExceptionCheck()) { + // Cannot get name of exception thrown as that can raise another exception. + jni()->ExceptionClear(); + JVMCI_event_1("error translating exception: unknown error"); + decode(THREAD, _encode_fail, 0L); + return 0; + } + return res; } - void decode(JavaThread* THREAD, Klass* vmSupport, jlong buffer) { + void decode(JavaThread* THREAD, DecodeFormat format, jlong buffer) { + Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, CHECK); JavaCallArguments jargs; + jargs.push_int(format); jargs.push_long(buffer); jargs.push_int(true); JavaValue result(T_VOID); diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp index 775299eb0c2..7c3f5841f3f 100644 --- a/src/hotspot/share/opto/loopTransform.cpp +++ b/src/hotspot/share/opto/loopTransform.cpp @@ -2043,12 +2043,6 @@ bool IdealLoopTree::is_invariant(Node* n) const { // to the new stride. void PhaseIdealLoop::update_main_loop_assertion_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, const int stride_con) { - if (init->Opcode() == Op_CastII) { - // skip over the cast added by PhaseIdealLoop::cast_incr_before_loop() when pre/post/main loops are created because - // it can get in the way of type propagation - assert(((CastIINode*)init)->carry_dependency() && loop_head->skip_predicates() == init->in(0), "casted iv phi from pre loop expected"); - init = init->in(1); - } Node* entry = ctrl; Node* prev_proj = ctrl; LoopNode* outer_loop_head = loop_head->skip_strip_mined(); diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 505b8cab05f..286f901bd5d 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -2378,7 +2378,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // The array's TypeKlassPtr was declared 'precise' or 'not precise' // according to the element type's subclassing. - return tkls->is_aryklassptr()->elem(); + return tkls->is_aryklassptr()->elem()->isa_klassptr()->cast_to_exactness(tkls->klass_is_exact()); } if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::super_offset())) { diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index cc9df0a3276..e82e4de0f66 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -575,7 +575,7 @@ JvmtiEnv::SetEventNotificationMode(jvmtiEventMode mode, jvmtiEvent event_type, j if (event_type == JVMTI_EVENT_CLASS_FILE_LOAD_HOOK && enabled) { record_class_file_load_hook_enabled(); } - JvmtiVTMSTransitionDisabler disabler(event_thread); + JvmtiVTMSTransitionDisabler disabler; if (event_thread == nullptr) { // Can be called at Agent_OnLoad() time with event_thread == nullptr diff --git a/src/java.base/macosx/native/libosxsecurity/KeystoreImpl.m b/src/java.base/macosx/native/libosxsecurity/KeystoreImpl.m index b4e19a27995..b4f13a80d96 100644 --- a/src/java.base/macosx/native/libosxsecurity/KeystoreImpl.m +++ b/src/java.base/macosx/native/libosxsecurity/KeystoreImpl.m @@ -363,7 +363,9 @@ static void addIdentitiesToKeystore(JNIEnv *env, jobject keyStore) // Call back to the Java object to create Java objects corresponding to this security object. jlong nativeKeyRef = ptr_to_jlong(privateKeyRef); (*env)->CallVoidMethod(env, keyStore, jm_createKeyEntry, alias, creationDate, nativeKeyRef, certRefArray, javaCertArray); - JNU_CHECK_EXCEPTION(env); + if ((*env)->ExceptionCheck(env)) { + goto errOut; + } } } while (searchResult == noErr); @@ -505,7 +507,9 @@ static void addCertificatesToKeystore(JNIEnv *env, jobject keyStore) // Call back to the Java object to create Java objects corresponding to this security object. jlong nativeRef = ptr_to_jlong(certRef); (*env)->CallVoidMethod(env, keyStore, jm_createTrustedCertEntry, alias, inputTrust, nativeRef, creationDate, certData); - JNU_CHECK_EXCEPTION(env); + if ((*env)->ExceptionCheck(env)) { + goto errOut; + } } } while (searchResult == noErr); diff --git a/src/java.base/share/classes/java/lang/Class.java b/src/java.base/share/classes/java/lang/Class.java index c036f7400b9..ddc5a55f0b2 100644 --- a/src/java.base/share/classes/java/lang/Class.java +++ b/src/java.base/share/classes/java/lang/Class.java @@ -170,7 +170,7 @@ * may be hidden interfaces. * * The {@linkplain #getName() name of a hidden class or interface} is - * not a binary name, + * not a {@linkplain ClassLoader##binary-name binary name}, * which means the following: *