From f536f5ab68235d27e9708674f707bcbff7840730 Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Thu, 15 Aug 2024 08:26:22 +0000 Subject: [PATCH 01/67] 8336086: G1: Use one G1CardSet instance for all young regions Reviewed-by: tschatzl, ayang --- src/hotspot/share/gc/g1/g1CardSet.cpp | 18 +++++++++- src/hotspot/share/gc/g1/g1CardSet.hpp | 2 ++ src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 9 +++++ src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 12 +++++++ src/hotspot/share/gc/g1/g1CollectionSet.cpp | 2 +- src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 12 +++++++ src/hotspot/share/gc/g1/g1ConcurrentMark.hpp | 2 ++ .../share/gc/g1/g1ConcurrentRefine.cpp | 14 ++++---- src/hotspot/share/gc/g1/g1FullCollector.cpp | 3 ++ src/hotspot/share/gc/g1/g1HeapRegion.cpp | 4 +++ src/hotspot/share/gc/g1/g1HeapRegion.hpp | 4 +++ .../share/gc/g1/g1HeapRegion.inline.hpp | 8 +++++ .../share/gc/g1/g1HeapRegionRemSet.cpp | 15 +++++++-- .../share/gc/g1/g1HeapRegionRemSet.hpp | 33 +++++++++++++++---- .../share/gc/g1/g1HeapRegionRemSet.inline.hpp | 22 ++++++++----- src/hotspot/share/gc/g1/g1Policy.cpp | 5 +++ src/hotspot/share/gc/g1/g1Policy.hpp | 2 ++ src/hotspot/share/gc/g1/g1RemSet.cpp | 4 +++ src/hotspot/share/gc/g1/g1RemSetSummary.cpp | 11 ++++++- src/hotspot/share/gc/g1/g1YoungCollector.cpp | 15 ++++++--- .../gc/g1/g1YoungGCPostEvacuateTasks.cpp | 9 ++++- 21 files changed, 173 insertions(+), 33 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp index 7ab739e0196..5c11a1a9677 100644 --- a/src/hotspot/share/gc/g1/g1CardSet.cpp +++ b/src/hotspot/share/gc/g1/g1CardSet.cpp @@ -244,6 +244,10 @@ class G1CardSetHashTable : public CHeapObj { using CHTScanTask = CardSetHash::ScanTask; const static uint BucketClaimSize = 16; + // The claim size for group cardsets should be smaller to facilitate + // better work distribution. The group cardsets should be larger than + // the per region cardsets. + const static uint GroupBucketClaimSize = 4; // Did we insert at least one card in the table? bool volatile _inserted_card; @@ -347,7 +351,15 @@ class G1CardSetHashTable : public CHeapObj { } void reset_table_scanner() { - _table_scanner.set(&_table, BucketClaimSize); + reset_table_scanner(BucketClaimSize); + } + + void reset_table_scanner_for_groups() { + reset_table_scanner(GroupBucketClaimSize); + } + + void reset_table_scanner(uint claim_size) { + _table_scanner.set(&_table, claim_size); } void grow() { @@ -1042,3 +1054,7 @@ void G1CardSet::clear() { void G1CardSet::reset_table_scanner() { _table->reset_table_scanner(); } + +void G1CardSet::reset_table_scanner_for_groups() { + _table->reset_table_scanner_for_groups(); +} diff --git a/src/hotspot/share/gc/g1/g1CardSet.hpp b/src/hotspot/share/gc/g1/g1CardSet.hpp index a9f1859d5c7..22530ddadaf 100644 --- a/src/hotspot/share/gc/g1/g1CardSet.hpp +++ b/src/hotspot/share/gc/g1/g1CardSet.hpp @@ -380,6 +380,8 @@ class G1CardSet : public CHeapObj { void reset_table_scanner(); + void reset_table_scanner_for_groups(); + // Iterate over the container, calling a method on every card or card range contained // in the card container. // For every container, first calls diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index aa99fbecbee..fd73b725a12 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -1159,6 +1159,8 @@ G1CollectedHeap::G1CollectedHeap() : _rem_set(nullptr), _card_set_config(), _card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()), + _young_regions_cardset_mm(card_set_config(), card_set_freelist_pool()), + _young_regions_cardset(card_set_config(), &_young_regions_cardset_mm), _cm(nullptr), _cm_thread(nullptr), _cr(nullptr), @@ -2693,6 +2695,7 @@ bool G1CollectedHeap::is_old_gc_alloc_region(G1HeapRegion* hr) { void G1CollectedHeap::set_region_short_lived_locked(G1HeapRegion* hr) { _eden.add(hr); _policy->set_region_eden(hr); + hr->install_group_cardset(young_regions_cardset()); } #ifdef ASSERT @@ -2902,6 +2905,8 @@ G1HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegio new_alloc_region->set_survivor(); _survivor.add(new_alloc_region); register_new_survivor_region_with_region_attr(new_alloc_region); + // Install the group cardset. + new_alloc_region->install_group_cardset(young_regions_cardset()); } else { new_alloc_region->set_old(); } @@ -3043,3 +3048,7 @@ void G1CollectedHeap::finish_codecache_marking_cycle() { CodeCache::on_gc_marking_cycle_finish(); CodeCache::arm_all_nmethods(); } + +void G1CollectedHeap::prepare_group_cardsets_for_scan () { + _young_regions_cardset.reset_table_scanner_for_groups(); +} diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index ea087aae662..0f8bf9ffd2b 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -779,7 +779,19 @@ class G1CollectedHeap : public CollectedHeap { G1MonotonicArenaFreePool _card_set_freelist_pool; + // Group cardsets + G1CardSetMemoryManager _young_regions_cardset_mm; + G1CardSet _young_regions_cardset; + public: + G1CardSetConfiguration* card_set_config() { return &_card_set_config; } + + G1CardSet* young_regions_cardset() { return &_young_regions_cardset; }; + + G1CardSetMemoryManager* young_regions_card_set_mm() { return &_young_regions_cardset_mm; } + + void prepare_group_cardsets_for_scan(); + // After a collection pause, reset eden and the collection set. void clear_eden(); void clear_collection_set(); diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index fe4dfafee97..d315497268f 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -297,7 +297,7 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi verify_young_cset_indices(); - double predicted_base_time_ms = _policy->predict_base_time_ms(pending_cards); + double predicted_base_time_ms = _policy->predict_base_time_ms(pending_cards, _g1h->young_regions_cardset()->occupied()); // Base time already includes the whole remembered set related time, so do not add that here // again. double predicted_eden_time = _policy->predict_young_region_other_time_ms(eden_region_length) + diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 3d56973299e..52d26418af6 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -2980,6 +2980,7 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p _total_capacity_bytes(0), _total_live_bytes(0), _total_remset_bytes(0), + _young_cardset_bytes_per_region(0), _total_code_roots_bytes(0) { if (!log_is_enabled(Trace, gc, liveness)) { @@ -2990,6 +2991,13 @@ G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* p MemRegion reserved = g1h->reserved(); double now = os::elapsedTime(); + uint num_young_regions = g1h->young_regions_count(); + size_t young_cardset_bytes = g1h->young_regions_cardset()->mem_size(); + + if (num_young_regions > 0) { + _young_cardset_bytes_per_region = young_cardset_bytes / num_young_regions; + } + // Print the header of the output. log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" @@ -3041,6 +3049,10 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(G1HeapRegion* r) { const char* remset_type = r->rem_set()->get_short_state_str(); FormatBuffer<16> gc_efficiency(""); + if (r->is_young()) { + remset_bytes = _young_cardset_bytes_per_region; + } + _total_used_bytes += used_bytes; _total_capacity_bytes += capacity_bytes; _total_live_bytes += live_bytes; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 918384372bb..b197afc65ee 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -958,6 +958,8 @@ class G1PrintRegionLivenessInfoClosure : public G1HeapRegionClosure { // Accumulator for the remembered set size size_t _total_remset_bytes; + size_t _young_cardset_bytes_per_region; + // Accumulator for code roots memory size size_t _total_code_roots_bytes; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp index 7d6cc9a41cb..8357737ef6a 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp @@ -254,21 +254,18 @@ uint64_t G1ConcurrentRefine::adjust_threads_wait_ms() const { } class G1ConcurrentRefine::RemSetSamplingClosure : public G1HeapRegionClosure { - size_t _sampled_card_rs_length; size_t _sampled_code_root_rs_length; public: RemSetSamplingClosure() : - _sampled_card_rs_length(0), _sampled_code_root_rs_length(0) {} + _sampled_code_root_rs_length(0) {} bool do_heap_region(G1HeapRegion* r) override { G1HeapRegionRemSet* rem_set = r->rem_set(); - _sampled_card_rs_length += rem_set->occupied(); _sampled_code_root_rs_length += rem_set->code_roots_list_length(); return false; } - size_t sampled_card_rs_length() const { return _sampled_card_rs_length; } size_t sampled_code_root_rs_length() const { return _sampled_code_root_rs_length; } }; @@ -286,10 +283,15 @@ class G1ConcurrentRefine::RemSetSamplingClosure : public G1HeapRegionClosure { // gen size to keep pause time length goal. void G1ConcurrentRefine::adjust_young_list_target_length() { if (_policy->use_adaptive_young_list_length()) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectionSet* cset = g1h->collection_set(); RemSetSamplingClosure cl; - G1CollectionSet* cset = G1CollectedHeap::heap()->collection_set(); cset->iterate(&cl); - _policy->revise_young_list_target_length(cl.sampled_card_rs_length(), cl.sampled_code_root_rs_length()); + + size_t card_rs_length = g1h->young_regions_cardset()->occupied(); + + size_t sampled_code_root_rs_length = cl.sampled_code_root_rs_length(); + _policy->revise_young_list_target_length(card_rs_length, sampled_code_root_rs_length); } } diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 219480227d3..4c66c526151 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -172,6 +172,7 @@ class PrepareRegionsClosure : public G1HeapRegionClosure { bool do_heap_region(G1HeapRegion* hr) { hr->prepare_for_full_gc(); + hr->uninstall_group_cardset(); G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr); _collector->before_marking_update_attribute_table(hr); return false; @@ -247,6 +248,8 @@ void G1FullCollector::complete_collection() { _heap->resize_all_tlabs(); + _heap->young_regions_cardset()->clear(); + _heap->policy()->record_full_collection_end(); _heap->gc_epilogue(true); diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp index 9851b1df9c9..9cb2650f820 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp @@ -119,6 +119,7 @@ void G1HeapRegion::hr_clear(bool clear_space) { clear_young_index_in_cset(); clear_index_in_opt_cset(); uninstall_surv_rate_group(); + uninstall_group_cardset(); set_free(); reset_pre_dummy_top(); @@ -215,6 +216,9 @@ void G1HeapRegion::clear_humongous() { } void G1HeapRegion::prepare_remset_for_scan() { + if (is_young()) { + uninstall_group_cardset(); + } _rem_set->reset_table_scanner(); } diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.hpp index ed953094a67..c17183d4034 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.hpp @@ -36,6 +36,7 @@ #include "runtime/mutex.hpp" #include "utilities/macros.hpp" +class G1CardSet; class G1CardSetConfiguration; class G1CollectedHeap; class G1CMBitMap; @@ -508,6 +509,9 @@ class G1HeapRegion : public CHeapObj { void install_surv_rate_group(G1SurvRateGroup* surv_rate_group); void uninstall_surv_rate_group(); + void install_group_cardset(G1CardSet* group_cardset); + void uninstall_group_cardset(); + void record_surv_words_in_group(size_t words_survived); // Determine if an address is in the parsable or the to-be-scrubbed area. diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp index f31c5fb553e..4a87c5f2514 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp @@ -511,4 +511,12 @@ inline void G1HeapRegion::add_pinned_object_count(size_t value) { Atomic::add(&_pinned_object_count, value, memory_order_relaxed); } +inline void G1HeapRegion::install_group_cardset(G1CardSet* group_cardset) { + _rem_set->install_group_cardset(group_cardset); +} + +inline void G1HeapRegion::uninstall_group_cardset() { + _rem_set->uninstall_group_cardset(); +} + #endif // SHARE_GC_G1_G1HEAPREGION_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp index 6e98b64adbc..fe1590b94a8 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp @@ -55,11 +55,19 @@ void G1HeapRegionRemSet::initialize(MemRegion reserved) { _heap_base_address = reserved.start(); } +void G1HeapRegionRemSet::uninstall_group_cardset() { + if (_saved_card_set != nullptr) { + _card_set = _saved_card_set; + _saved_card_set = nullptr; + } +} + G1HeapRegionRemSet::G1HeapRegionRemSet(G1HeapRegion* hr, G1CardSetConfiguration* config) : _code_roots(), _card_set_mm(config, G1CollectedHeap::heap()->card_set_freelist_pool()), - _card_set(config, &_card_set_mm), + _card_set(new G1CardSet(config, &_card_set_mm)), + _saved_card_set(nullptr), _hr(hr), _state(Untracked) { } @@ -68,11 +76,12 @@ void G1HeapRegionRemSet::clear_fcc() { } void G1HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) { + assert(_saved_card_set == nullptr, "pre-condition"); if (!only_cardset) { _code_roots.clear(); } clear_fcc(); - _card_set.clear(); + _card_set->clear(); if (!keep_tracked) { set_state_untracked(); } else { @@ -83,7 +92,7 @@ void G1HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) { void G1HeapRegionRemSet::reset_table_scanner() { _code_roots.reset_table_scanner(); - _card_set.reset_table_scanner(); + _card_set->reset_table_scanner(); } G1MonotonicArenaMemoryStats G1HeapRegionRemSet::card_set_memory_stats() const { diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp index e92ecdc9cf9..843eb76bbc9 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp @@ -47,7 +47,8 @@ class G1HeapRegionRemSet : public CHeapObj { G1CardSetMemoryManager _card_set_mm; // The set of cards in the Java heap - G1CardSet _card_set; + G1CardSet* _card_set; + G1CardSet* _saved_card_set; G1HeapRegion* _hr; @@ -58,9 +59,24 @@ class G1HeapRegionRemSet : public CHeapObj { public: G1HeapRegionRemSet(G1HeapRegion* hr, G1CardSetConfiguration* config); + ~G1HeapRegionRemSet() { delete _card_set; } bool cardset_is_empty() const { - return _card_set.is_empty(); + return _card_set->is_empty(); + } + + void install_group_cardset(G1CardSet* group_cardset) { + assert(group_cardset != nullptr, "pre-condition"); + assert(_saved_card_set == nullptr, "pre-condition"); + + _saved_card_set = _card_set; + _card_set = group_cardset; + } + + void uninstall_group_cardset(); + + bool has_group_cardset() { + return _saved_card_set != nullptr; } bool is_empty() const { @@ -68,7 +84,7 @@ class G1HeapRegionRemSet : public CHeapObj { } bool occupancy_less_or_equal_than(size_t occ) const { - return (code_roots_list_length() == 0) && _card_set.occupancy_less_or_equal_to(occ); + return (code_roots_list_length() == 0) && _card_set->occupancy_less_or_equal_to(occ); } // Iterate the card based remembered set for merging them into the card table. @@ -77,10 +93,15 @@ class G1HeapRegionRemSet : public CHeapObj { template inline void iterate_for_merge(CardOrRangeVisitor& cl); + template + inline static void iterate_for_merge(G1CardSet* card_set, CardOrRangeVisitor& cl); + size_t occupied() { - return _card_set.occupied(); + return _card_set->occupied(); } + G1CardSet* card_set() { return _card_set; } + static void initialize(MemRegion reserved); // Coarsening statistics since VM start. @@ -125,13 +146,13 @@ class G1HeapRegionRemSet : public CHeapObj { // The actual # of bytes this hr_remset takes up. Also includes the code // root set. size_t mem_size() { - return _card_set.mem_size() + return _card_set->mem_size() + (sizeof(G1HeapRegionRemSet) - sizeof(G1CardSet)) // Avoid double-counting G1CardSet. + code_roots_mem_size(); } size_t unused_mem_size() { - return _card_set.unused_mem_size(); + return _card_set->unused_mem_size(); } // Returns the memory occupancy of all static data structures associated diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp index 457027ad126..0df9874e9dd 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp @@ -108,13 +108,17 @@ class G1HeapRegionRemSetMergeCardClosure : public G1CardSet::ContainerPtrClosure template inline void G1HeapRegionRemSet::iterate_for_merge(CardOrRangeVisitor& cl) { - G1HeapRegionRemSetMergeCardClosure cl2(&_card_set, - cl, - _card_set.config()->log2_card_regions_per_heap_region(), - _card_set.config()->log2_cards_per_card_region()); - _card_set.iterate_containers(&cl2, true /* at_safepoint */); + iterate_for_merge(_card_set, cl); } +template +void G1HeapRegionRemSet::iterate_for_merge(G1CardSet* card_set, CardOrRangeVisitor& cl) { + G1HeapRegionRemSetMergeCardClosure cl2(card_set, + cl, + card_set->config()->log2_card_regions_per_heap_region(), + card_set->config()->log2_cards_per_card_region()); + card_set->iterate_containers(&cl2, true /* at_safepoint */); +} uintptr_t G1HeapRegionRemSet::to_card(OopOrNarrowOopStar from) const { return pointer_delta(from, _heap_base_address, 1) >> CardTable::card_shift(); @@ -130,18 +134,18 @@ void G1HeapRegionRemSet::add_reference(OopOrNarrowOopStar from, uint tid) { // We can't check whether the card is in the remembered set - the card container // may be coarsened just now. //assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from)); - return; + return; } - _card_set.add_card(to_card(from)); + _card_set->add_card(to_card(from)); } bool G1HeapRegionRemSet::contains_reference(OopOrNarrowOopStar from) { - return _card_set.contains_card(to_card(from)); + return _card_set->contains_card(to_card(from)); } void G1HeapRegionRemSet::print_info(outputStream* st, OopOrNarrowOopStar from) { - _card_set.print_info(st, to_card(from)); + _card_set->print_info(st, to_card(from)); } #endif // SHARE_VM_GC_G1_G1HEAPREGIONREMSET_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 7d0acd3a326..e7e57c962c7 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -1089,6 +1089,11 @@ double G1Policy::predict_base_time_ms(size_t pending_cards, double G1Policy::predict_base_time_ms(size_t pending_cards) const { bool for_young_only_phase = collector_state()->in_young_only_phase(); size_t card_rs_length = _analytics->predict_card_rs_length(for_young_only_phase); + return predict_base_time_ms(pending_cards, card_rs_length); +} + +double G1Policy::predict_base_time_ms(size_t pending_cards, size_t card_rs_length) const { + bool for_young_only_phase = collector_state()->in_young_only_phase(); size_t code_root_rs_length = _analytics->predict_code_root_rs_length(for_young_only_phase); return predict_base_time_ms(pending_cards, card_rs_length, code_root_rs_length); } diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index 9de27e76d96..98d44408467 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -138,6 +138,8 @@ class G1Policy: public CHeapObj { double predict_base_time_ms(size_t pending_cards) const; + double predict_base_time_ms(size_t pending_cards, size_t card_rs_length) const; + private: // Base time contains handling remembered sets and constant other time of the // whole young gen, refinement buffers, and copying survivors. diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 0f9b9d17df7..f5f65cf1c48 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -1379,6 +1379,10 @@ class G1MergeHeapRootsTask : public WorkerTask { G1ClearBitmapClosure clear(g1h); G1CombinedClosure combined(&merge, &clear); + if (_initial_evacuation) { + G1HeapRegionRemSet::iterate_for_merge(g1h->young_regions_cardset(), merge); + } + g1h->collection_set_iterate_increment_from(&combined, nullptr, worker_id); G1MergeCardSetStats stats = merge.stats(); diff --git a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp index 14fb8c0b8d2..5ea3500a7b0 100644 --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp @@ -218,15 +218,24 @@ class HRRSStatsIter: public G1HeapRegionClosure { bool do_heap_region(G1HeapRegion* r) { G1HeapRegionRemSet* hrrs = r->rem_set(); + size_t occupied_cards = hrrs->occupied(); // G1HeapRegionRemSet::mem_size() includes the // size of the code roots size_t rs_unused_mem_sz = hrrs->unused_mem_size(); size_t rs_mem_sz = hrrs->mem_size(); + + if (r->is_young()) { + uint num_young = G1CollectedHeap::heap()->young_regions_count(); + occupied_cards /= num_young; + rs_unused_mem_sz /= num_young; + rs_mem_sz /= num_young; + } + if (rs_mem_sz > _max_rs_mem_sz) { _max_rs_mem_sz = rs_mem_sz; _max_rs_mem_sz_region = r; } - size_t occupied_cards = hrrs->occupied(); + size_t code_root_mem_sz = hrrs->code_roots_mem_size(); if (code_root_mem_sz > max_code_root_mem_sz()) { _max_code_root_mem_sz = code_root_mem_sz; diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index cadab2fbc48..f2fe93015c5 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -295,10 +295,9 @@ class G1PrepareEvacuationTask : public WorkerTask { G1MonotonicArenaMemoryStats _card_set_stats; void sample_card_set_size(G1HeapRegion* hr) { - // Sample card set sizes for young gen and humongous before GC: this makes - // the policy to give back memory to the OS keep the most recent amount of - // memory for these regions. - if (hr->is_young() || hr->is_starts_humongous()) { + // Sample card set sizes for humongous before GC: this makes the policy to give + // back memory to the OS keep the most recent amount of memory for these regions. + if (hr->is_starts_humongous()) { _card_set_stats.add(hr->rem_set()->card_set_memory_stats()); } } @@ -507,6 +506,9 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) { Ticks start = Ticks::now(); rem_set()->prepare_for_scan_heap_roots(); + + _g1h->prepare_group_cardsets_for_scan(); + phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0); } @@ -514,7 +516,10 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) G1PrepareEvacuationTask g1_prep_task(_g1h); Tickspan task_time = run_task_timed(&g1_prep_task); - _g1h->set_young_gen_card_set_stats(g1_prep_task.all_card_set_stats()); + G1MonotonicArenaMemoryStats sampled_card_set_stats = g1_prep_task.all_card_set_stats(); + sampled_card_set_stats.add(_g1h->young_regions_card_set_mm()->memory_stats()); + _g1h->set_young_gen_card_set_stats(sampled_card_set_stats); + _g1h->set_humongous_stats(g1_prep_task.humongous_total(), g1_prep_task.humongous_candidates()); phase_times()->record_register_regions(task_time.seconds() * 1000.0); diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index e1f0df05c22..a0e9a9b1569 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -673,6 +673,10 @@ class FreeCSetStats { G1Policy *policy = g1h->policy(); policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc); + + // Add the cards from the group cardsets. + _card_rs_length += g1h->young_regions_cardset()->occupied(); + policy->record_card_rs_length(_card_rs_length); policy->cset_regions_freed(); } @@ -822,9 +826,10 @@ class FreeCSetClosure : public G1HeapRegionClosure { JFREventForRegion event(r, _worker_id); TimerForRegion timer(timer_for_region(r)); - stats()->account_card_rs_length(r); if (r->is_young()) { + // We only use card_rs_length statistics to estimate young regions length. + stats()->account_card_rs_length(r); assert_tracks_surviving_words(r); r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]); } @@ -911,6 +916,8 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1 p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0); _g1h->clear_collection_set(); + + _g1h->young_regions_cardset()->clear(); } double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->region_length(); } From da7311bbe37c2b9632b117d52a77c659047820b7 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 15 Aug 2024 08:50:29 +0000 Subject: [PATCH 02/67] 8338286: GHA: Demote x86_32 to hotspot build only Reviewed-by: ihse --- .github/workflows/main.yml | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6a1b4420e3a..1dc0b25c345 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,7 +36,7 @@ on: platforms: description: 'Platform(s) to execute on (comma separated, e.g. "linux-x64, macos, aarch64")' required: true - default: 'linux-x64, linux-x86, linux-x64-variants, linux-cross-compile, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs' + default: 'linux-x64, linux-x86-hs, linux-x64-variants, linux-cross-compile, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs' configure-arguments: description: 'Additional configure arguments' required: false @@ -59,7 +59,7 @@ jobs: runs-on: ubuntu-22.04 outputs: linux-x64: ${{ steps.include.outputs.linux-x64 }} - linux-x86: ${{ steps.include.outputs.linux-x86 }} + linux-x86-hs: ${{ steps.include.outputs.linux-x86-hs }} linux-x64-variants: ${{ steps.include.outputs.linux-x64-variants }} linux-cross-compile: ${{ steps.include.outputs.linux-cross-compile }} macos-x64: ${{ steps.include.outputs.macos-x64 }} @@ -111,7 +111,7 @@ jobs: } echo "linux-x64=$(check_platform linux-x64 linux x64)" >> $GITHUB_OUTPUT - echo "linux-x86=$(check_platform linux-x86 linux x86)" >> $GITHUB_OUTPUT + echo "linux-x86-hs=$(check_platform linux-x86-hs linux x86)" >> $GITHUB_OUTPUT echo "linux-x64-variants=$(check_platform linux-x64-variants variants)" >> $GITHUB_OUTPUT echo "linux-cross-compile=$(check_platform linux-cross-compile cross-compile)" >> $GITHUB_OUTPUT echo "macos-x64=$(check_platform macos-x64 macos x64)" >> $GITHUB_OUTPUT @@ -135,12 +135,13 @@ jobs: make-arguments: ${{ github.event.inputs.make-arguments }} if: needs.select.outputs.linux-x64 == 'true' - build-linux-x86: - name: linux-x86 + build-linux-x86-hs: + name: linux-x86-hs needs: select uses: ./.github/workflows/build-linux.yml with: platform: linux-x86 + make-target: 'hotspot' gcc-major-version: '10' gcc-package-suffix: '-multilib' apt-architecture: 'i386' @@ -150,7 +151,7 @@ jobs: extra-conf-options: '--with-target-bits=32 --enable-fallback-linker --enable-libffi-bundling' configure-arguments: ${{ github.event.inputs.configure-arguments }} make-arguments: ${{ github.event.inputs.make-arguments }} - if: needs.select.outputs.linux-x86 == 'true' + if: needs.select.outputs.linux-x86-hs == 'true' build-linux-x64-hs-nopch: name: linux-x64-hs-nopch @@ -300,16 +301,6 @@ jobs: bootjdk-platform: linux-x64 runs-on: ubuntu-22.04 - test-linux-x86: - name: linux-x86 - needs: - - build-linux-x86 - uses: ./.github/workflows/test.yml - with: - platform: linux-x86 - bootjdk-platform: linux-x64 - runs-on: ubuntu-22.04 - test-macos-x64: name: macos-x64 needs: @@ -347,7 +338,7 @@ jobs: if: always() needs: - build-linux-x64 - - build-linux-x86 + - build-linux-x86-hs - build-linux-x64-hs-nopch - build-linux-x64-hs-zero - build-linux-x64-hs-minimal @@ -358,7 +349,6 @@ jobs: - build-windows-x64 - build-windows-aarch64 - test-linux-x64 - - test-linux-x86 - test-macos-x64 - test-windows-x64 From 74fdd6868d3f71d44ef9f71a0ca9506c04d39148 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 15 Aug 2024 11:24:22 +0000 Subject: [PATCH 03/67] 8333791: Fix memory barriers for @Stable fields Reviewed-by: liach, vlivanov --- src/hotspot/share/c1/c1_GraphBuilder.cpp | 12 +- src/hotspot/share/c1/c1_IR.cpp | 1 + src/hotspot/share/c1/c1_IR.hpp | 3 + src/hotspot/share/opto/parse.hpp | 10 +- src/hotspot/share/opto/parse1.cpp | 29 +-- src/hotspot/share/opto/parse3.cpp | 21 +- .../irTests/stable/StablePrimArrayTest.java | 170 +++++++++++++++++ .../irTests/stable/StablePrimFinalTest.java | 100 ++++++++++ .../irTests/stable/StablePrimPlainTest.java | 114 +++++++++++ .../stable/StablePrimVolatileTest.java | 114 +++++++++++ .../c2/irTests/stable/StableRefArrayTest.java | 179 ++++++++++++++++++ .../c2/irTests/stable/StableRefFinalTest.java | 97 ++++++++++ .../c2/irTests/stable/StableRefPlainTest.java | 118 ++++++++++++ .../irTests/stable/StableRefVolatileTest.java | 118 ++++++++++++ 14 files changed, 1047 insertions(+), 39 deletions(-) create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimArrayTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimFinalTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimPlainTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimVolatileTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefArrayTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefFinalTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefPlainTest.java create mode 100644 test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefVolatileTest.java diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp index 0493b0458cd..780ced7f433 100644 --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp @@ -1563,7 +1563,7 @@ void GraphBuilder::method_return(Value x, bool ignore_return) { // The conditions for a memory barrier are described in Parse::do_exits(). bool need_mem_bar = false; if (method()->name() == ciSymbols::object_initializer_name() && - (scope()->wrote_final() || + (scope()->wrote_final() || scope()->wrote_stable() || (AlwaysSafeConstructors && scope()->wrote_fields()) || (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) { need_mem_bar = true; @@ -1741,15 +1741,17 @@ void GraphBuilder::access_field(Bytecodes::Code code) { } } - if (field->is_final() && (code == Bytecodes::_putfield)) { - scope()->set_wrote_final(); - } - if (code == Bytecodes::_putfield) { scope()->set_wrote_fields(); if (field->is_volatile()) { scope()->set_wrote_volatile(); } + if (field->is_final()) { + scope()->set_wrote_final(); + } + if (field->is_stable()) { + scope()->set_wrote_stable(); + } } const int offset = !needs_patching ? field->offset_in_bytes() : -1; diff --git a/src/hotspot/share/c1/c1_IR.cpp b/src/hotspot/share/c1/c1_IR.cpp index e375d16aafb..b3faa54cc69 100644 --- a/src/hotspot/share/c1/c1_IR.cpp +++ b/src/hotspot/share/c1/c1_IR.cpp @@ -146,6 +146,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe _wrote_final = false; _wrote_fields = false; _wrote_volatile = false; + _wrote_stable = false; _start = nullptr; if (osr_bci != -1) { diff --git a/src/hotspot/share/c1/c1_IR.hpp b/src/hotspot/share/c1/c1_IR.hpp index 3035643708a..e2582c77b39 100644 --- a/src/hotspot/share/c1/c1_IR.hpp +++ b/src/hotspot/share/c1/c1_IR.hpp @@ -149,6 +149,7 @@ class IRScope: public CompilationResourceObj { bool _wrote_final; // has written final field bool _wrote_fields; // has written fields bool _wrote_volatile; // has written volatile field + bool _wrote_stable; // has written @Stable field BlockBegin* _start; // the start block, successsors are method entries ResourceBitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable @@ -187,6 +188,8 @@ class IRScope: public CompilationResourceObj { bool wrote_fields () const { return _wrote_fields; } void set_wrote_volatile() { _wrote_volatile = true; } bool wrote_volatile () const { return _wrote_volatile; } + void set_wrote_stable() { _wrote_stable = true; } + bool wrote_stable() const { return _wrote_stable; } }; diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index a55c9cb0cb1..a2690aa6704 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -358,7 +358,7 @@ class Parse : public GraphKit { bool _wrote_volatile; // Did we write a volatile field? bool _wrote_stable; // Did we write a @Stable field? bool _wrote_fields; // Did we write any field? - Node* _alloc_with_final; // An allocation node with final field + Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field // Variables which track Java semantics during bytecode parsing: @@ -403,10 +403,10 @@ class Parse : public GraphKit { void set_wrote_stable(bool z) { _wrote_stable = z; } bool wrote_fields() const { return _wrote_fields; } void set_wrote_fields(bool z) { _wrote_fields = z; } - Node* alloc_with_final() const { return _alloc_with_final; } - void set_alloc_with_final(Node* n) { - assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?"); - _alloc_with_final = n; + Node* alloc_with_final_or_stable() const { return _alloc_with_final_or_stable; } + void set_alloc_with_final_or_stable(Node* n) { + assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?"); + _alloc_with_final_or_stable = n; } Block* block() const { return _block; } diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index 14ef93a5eed..05627450585 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -412,7 +412,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) _wrote_volatile = false; _wrote_stable = false; _wrote_fields = false; - _alloc_with_final = nullptr; + _alloc_with_final_or_stable = nullptr; _block = nullptr; _first_return = true; _replaced_nodes_for_exceptions = false; @@ -988,8 +988,8 @@ void Parse::do_exits() { // Figure out if we need to emit the trailing barrier. The barrier is only // needed in the constructors, and only in three cases: // - // 1. The constructor wrote a final. The effects of all initializations - // must be committed to memory before any code after the constructor + // 1. The constructor wrote a final or a @Stable field. All these + // initializations must be ordered before any code after the constructor // publishes the reference to the newly constructed object. Rather // than wait for the publication, we simply block the writes here. // Rather than put a barrier on only those writes which are required @@ -1014,34 +1014,23 @@ void Parse::do_exits() { // exceptional returns, since they cannot publish normally. // if (method()->is_object_initializer() && - (wrote_final() || + (wrote_final() || wrote_stable() || (AlwaysSafeConstructors && wrote_fields()) || (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) { + Node* recorded_alloc = alloc_with_final_or_stable(); _exits.insert_mem_bar(UseStoreStoreForCtor ? Op_MemBarStoreStore : Op_MemBarRelease, - alloc_with_final()); + recorded_alloc); // If Memory barrier is created for final fields write // and allocation node does not escape the initialize method, // then barrier introduced by allocation node can be removed. - if (DoEscapeAnalysis && alloc_with_final()) { - AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_with_final()); + if (DoEscapeAnalysis && (recorded_alloc != nullptr)) { + AllocateNode* alloc = AllocateNode::Ideal_allocation(recorded_alloc); alloc->compute_MemBar_redundancy(method()); } if (PrintOpto && (Verbose || WizardMode)) { method()->print_name(); - tty->print_cr(" writes finals and needs a memory barrier"); - } - } - - // Any method can write a @Stable field; insert memory barriers - // after those also. Can't bind predecessor allocation node (if any) - // with barrier because allocation doesn't always dominate - // MemBarRelease. - if (wrote_stable()) { - _exits.insert_mem_bar(Op_MemBarRelease); - if (PrintOpto && (Verbose || WizardMode)) { - method()->print_name(); - tty->print_cr(" writes @Stable and needs a memory barrier"); + tty->print_cr(" writes finals/@Stable and needs a memory barrier"); } } diff --git a/src/hotspot/share/opto/parse3.cpp b/src/hotspot/share/opto/parse3.cpp index a2bf653b176..a9fad4e3633 100644 --- a/src/hotspot/share/opto/parse3.cpp +++ b/src/hotspot/share/opto/parse3.cpp @@ -236,22 +236,25 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { set_wrote_fields(true); // If the field is final, the rules of Java say we are in or . - // Note the presence of writes to final non-static fields, so that we + // If the field is @Stable, we can be in any method, but we only care about + // constructors at this point. + // + // Note the presence of writes to final/@Stable non-static fields, so that we // can insert a memory barrier later on to keep the writes from floating // out of the constructor. - // Any method can write a @Stable field; insert memory barriers after those also. - if (field->is_final()) { - set_wrote_final(true); + if (field->is_final() || field->is_stable()) { + if (field->is_final()) { + set_wrote_final(true); + } + if (field->is_stable()) { + set_wrote_stable(true); + } if (AllocateNode::Ideal_allocation(obj) != nullptr) { // Preserve allocation ptr to create precedent edge to it in membar // generated on exit from constructor. - // Can't bind stable with its allocation, only record allocation for final field. - set_alloc_with_final(obj); + set_alloc_with_final_or_stable(obj); } } - if (field->is_stable()) { - set_wrote_stable(true); - } } } diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimArrayTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimArrayTest.java new file mode 100644 index 00000000000..24733700f81 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimArrayTest.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StablePrimArrayTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StablePrimArrayTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static final int[] EMPTY_INTEGER = new int[] { 0 }; + static final int[] FULL_INTEGER = new int[] { 42 }; + + static class Carrier { + @Stable + int[] field; + + @ForceInline + public Carrier(int initLevel) { + switch (initLevel) { + case 0: + // Do nothing. + break; + case 1: + field = EMPTY_INTEGER; + break; + case 2: + field = FULL_INTEGER; + break; + default: + throw new IllegalStateException("Unknown level"); + } + } + + @ForceInline + public void initEmpty() { + field = EMPTY_INTEGER; + } + + @ForceInline + public void initFull() { + field = FULL_INTEGER; + } + + } + + static final Carrier BLANK_CARRIER = new Carrier(0); + static final Carrier INIT_EMPTY_CARRIER = new Carrier(1); + static final Carrier INIT_FULL_CARRIER = new Carrier(2); + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for plain fields. + int[] is = BLANK_CARRIER.field; + if (is != null) { + return is[0]; + } + return 0; + } + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testPartialFold() { + // Access should not be folded. + // No barriers expected for plain fields. + int[] is = INIT_EMPTY_CARRIER.field; + if (is != null) { + return is[0]; + } + return 0; + } + + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + int[] is = INIT_FULL_CARRIER.field; + if (is != null) { + return is[0]; + } + return 0; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Only the header barrier. + return new Carrier(0); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorEmptyInit() { + // Only the header barrier. + return new Carrier(1); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorFullInit() { + // Only the header barrier. + return new Carrier(2); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodEmptyInit() { + // Reference inits do not have membars. + INIT_EMPTY_CARRIER.initEmpty(); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodFullInit() { + // Reference inits do not have membars. + INIT_FULL_CARRIER.initFull(); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimFinalTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimFinalTest.java new file mode 100644 index 00000000000..355fadf6cc1 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimFinalTest.java @@ -0,0 +1,100 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StablePrimFinalTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StablePrimFinalTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static class Carrier { + @Stable + final int field; + + @ForceInline + public Carrier(boolean init) { + field = init ? 42 : 0; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, "1" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for final fields. + return BLANK_CARRIER.field; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + return INIT_CARRIER.field; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Single header+final barrier. + return new Carrier(false); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorFullInit() { + // Single header+final barrier. + return new Carrier(true); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimPlainTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimPlainTest.java new file mode 100644 index 00000000000..38cc8bfad4e --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimPlainTest.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StablePrimPlainTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StablePrimPlainTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static class Carrier { + @Stable + int field; + + @ForceInline + public Carrier(boolean init) { + if (init) { + field = 42; + } + } + + @ForceInline + public void init() { + field = 42; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, "1" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for plain fields. + return BLANK_CARRIER.field; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + return INIT_CARRIER.field; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Only the header barrier. + return new Carrier(false); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorFullInit() { + // Only the header barrier. + return new Carrier(true); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodInit() { + // Primitive inits have no membars. + INIT_CARRIER.init(); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimVolatileTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimVolatileTest.java new file mode 100644 index 00000000000..159c2f7321d --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StablePrimVolatileTest.java @@ -0,0 +1,114 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StablePrimVolatileTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StablePrimVolatileTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static class Carrier { + @Stable + volatile int field; + + @ForceInline + public Carrier(boolean init) { + if (init) { + field = 42; + } + } + + @ForceInline + public void init() { + field = 42; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, "1" }) + @IR(counts = { IRNode.MEMBAR, ">0" }) + static int testNoFold() { + // Access should not be folded. + // Barriers expected for volatile fields. + return BLANK_CARRIER.field; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + return INIT_CARRIER.field; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Expect only the header barrier. + return new Carrier(false); + } + + @Test + @IR(counts = { IRNode.MEMBAR, ">0" }) + static Carrier testConstructorFullInit() { + // Volatile barriers expected. + return new Carrier(true); + } + + @Test + @IR(counts = { IRNode.MEMBAR, ">0" }) + static void testMethodInit() { + // Volatile barriers expected. + INIT_CARRIER.init(); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefArrayTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefArrayTest.java new file mode 100644 index 00000000000..027bd2dce30 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefArrayTest.java @@ -0,0 +1,179 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StableRefArrayTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StableRefArrayTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static final Integer[] EMPTY_INTEGER = new Integer[] { null }; + static final Integer[] FULL_INTEGER = new Integer[] { 42 }; + + static class Carrier { + @Stable + Integer[] field; + + @ForceInline + public Carrier(int initLevel) { + switch (initLevel) { + case 0: + // Do nothing. + break; + case 1: + field = EMPTY_INTEGER; + break; + case 2: + field = FULL_INTEGER; + break; + default: + throw new IllegalStateException("Unknown level"); + } + } + + @ForceInline + public void initEmpty() { + field = EMPTY_INTEGER; + } + + @ForceInline + public void initFull() { + field = FULL_INTEGER; + } + + } + + static final Carrier BLANK_CARRIER = new Carrier(0); + static final Carrier INIT_EMPTY_CARRIER = new Carrier(1); + static final Carrier INIT_FULL_CARRIER = new Carrier(2); + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for plain fields. + Integer[] is = BLANK_CARRIER.field; + if (is != null) { + Integer i = is[0]; + if (i != null) { + return i; + } + } + return 0; + } + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testPartialFold() { + // Access should not be folded. + // No barriers expected for plain fields. + Integer[] is = INIT_EMPTY_CARRIER.field; + if (is != null) { + Integer i = is[0]; + if (i != null) { + return i; + } + } + return 0; + } + + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + Integer[] is = INIT_FULL_CARRIER.field; + if (is != null) { + Integer i = is[0]; + if (i != null) { + return i; + } + } + return 0; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Only the header barrier. + return new Carrier(0); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorEmptyInit() { + // Only the header barrier. + return new Carrier(1); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorFullInit() { + // Only the header barrier. + return new Carrier(2); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodEmptyInit() { + // Reference inits do not have membars. + INIT_EMPTY_CARRIER.initEmpty(); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodFullInit() { + // Reference inits do not have membars. + INIT_FULL_CARRIER.initFull(); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefFinalTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefFinalTest.java new file mode 100644 index 00000000000..405c86a5fc9 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefFinalTest.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StableRefFinalTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StableRefFinalTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static final Integer INTEGER = 42; + + static class Carrier { + @Stable + final Integer field; + + @ForceInline + public Carrier(boolean init) { + field = init ? INTEGER : null; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for plain fields. + Integer i = BLANK_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + Integer i = INIT_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorInit() { + // Only the header+final barrier. + return new Carrier(true); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefPlainTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefPlainTest.java new file mode 100644 index 00000000000..bd5be32459d --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefPlainTest.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StableRefPlainTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StableRefPlainTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static final Integer INTEGER = 42; + + static class Carrier { + @Stable + Integer field; + + @ForceInline + public Carrier(boolean init) { + if (init) { + field = INTEGER; + } + } + + @ForceInline + public void init() { + field = INTEGER; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(failOn = { IRNode.MEMBAR }) + static int testNoFold() { + // Access should not be folded. + // No barriers expected for plain fields. + Integer i = BLANK_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + Integer i = INIT_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Only the header barrier. + return new Carrier(false); + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorFullInit() { + // Only the header barrier. + return new Carrier(true); + } + + @Test + @IR(failOn = { IRNode.MEMBAR }) + static void testMethodInit() { + // Reference inits do not have membars. + INIT_CARRIER.init(); + } + +} diff --git a/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefVolatileTest.java b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefVolatileTest.java new file mode 100644 index 00000000000..7d7d7fcad77 --- /dev/null +++ b/test/hotspot/jtreg/compiler/c2/irTests/stable/StableRefVolatileTest.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8333791 + * @requires os.arch=="aarch64" | os.arch=="riscv64" | os.arch=="x86_64" | os.arch=="amd64" + * @requires vm.gc.Parallel + * @requires vm.compiler2.enabled + * @summary Check stable field folding and barriers + * @modules java.base/jdk.internal.vm.annotation + * @library /test/lib / + * @run driver compiler.c2.irTests.stable.StableRefVolatileTest + */ + +package compiler.c2.irTests.stable; + +import compiler.lib.ir_framework.*; +import jdk.test.lib.Asserts; + +import jdk.internal.vm.annotation.Stable; + +public class StableRefVolatileTest { + + public static void main(String[] args) { + TestFramework tf = new TestFramework(); + tf.addTestClassesToBootClassPath(); + tf.addFlags( + "-XX:+UnlockExperimentalVMOptions", + "-XX:CompileThreshold=100", + "-XX:-TieredCompilation", + "-XX:+UseParallelGC" + ); + tf.start(); + } + + static final Integer INTEGER = 42; + + static class Carrier { + @Stable + volatile Integer field; + + @ForceInline + public Carrier(boolean init) { + if (init) { + field = INTEGER; + } + } + + @ForceInline + public void init() { + field = INTEGER; + } + } + + static final Carrier BLANK_CARRIER = new Carrier(false); + static final Carrier INIT_CARRIER = new Carrier(true); + + @Test + @IR(counts = { IRNode.LOAD, ">0" }) + @IR(counts = { IRNode.MEMBAR, ">0" }) + static int testNoFold() { + // Access should not be folded. + // Barriers are expected for volatile field. + Integer i = BLANK_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(failOn = { IRNode.LOAD, IRNode.MEMBAR }) + static int testFold() { + // Access should be completely folded. + Integer i = INIT_CARRIER.field; + return i != null ? i : 0; + } + + @Test + @IR(counts = { IRNode.MEMBAR_STORESTORE, "1" }) + static Carrier testConstructorBlankInit() { + // Only the header barrier. + return new Carrier(false); + } + + @Test + @IR(counts = { IRNode.MEMBAR, ">0" }) + static Carrier testConstructorFullInit() { + // Volatile writes, expect more barriers. + return new Carrier(true); + } + + @Test + @IR(counts = { IRNode.MEMBAR, ">0" }) + static void testMethodInit() { + // Barriers are expected for volatile fields. + INIT_CARRIER.init(); + } + +} From 56dec215b0d056fc23137372ecb3376af2a7b891 Mon Sep 17 00:00:00 2001 From: Sonia Zaldana Calles Date: Thu, 15 Aug 2024 13:28:25 +0000 Subject: [PATCH 04/67] 8338014: Improve usage of @jvms tags in class file API Reviewed-by: darcy, liach, asotona --- .../classes/java/lang/classfile/AnnotationValue.java | 2 +- .../share/classes/java/lang/classfile/Attribute.java | 4 ++-- .../classes/java/lang/classfile/ClassSignature.java | 2 +- .../classes/java/lang/classfile/MethodSignature.java | 2 +- .../share/classes/java/lang/classfile/Opcode.java | 4 ++-- .../share/classes/java/lang/classfile/Signature.java | 2 +- .../classes/java/lang/classfile/TypeAnnotation.java | 10 +++++----- .../attribute/AnnotationDefaultAttribute.java | 4 ++-- .../classfile/attribute/BootstrapMethodsAttribute.java | 4 ++-- .../java/lang/classfile/attribute/CodeAttribute.java | 2 +- .../classfile/attribute/ConstantValueAttribute.java | 4 ++-- .../lang/classfile/attribute/DeprecatedAttribute.java | 4 ++-- .../classfile/attribute/EnclosingMethodAttribute.java | 4 ++-- .../lang/classfile/attribute/ExceptionsAttribute.java | 4 ++-- .../classfile/attribute/InnerClassesAttribute.java | 4 ++-- .../classfile/attribute/LineNumberTableAttribute.java | 4 ++-- .../attribute/LocalVariableTableAttribute.java | 4 ++-- .../attribute/LocalVariableTypeTableAttribute.java | 4 ++-- .../classfile/attribute/MethodParametersAttribute.java | 4 ++-- .../java/lang/classfile/attribute/ModuleAttribute.java | 2 +- .../classfile/attribute/ModuleMainClassAttribute.java | 4 ++-- .../classfile/attribute/ModulePackagesAttribute.java | 4 ++-- .../lang/classfile/attribute/NestHostAttribute.java | 4 ++-- .../lang/classfile/attribute/NestMembersAttribute.java | 4 ++-- .../attribute/PermittedSubclassesAttribute.java | 4 ++-- .../java/lang/classfile/attribute/RecordAttribute.java | 4 ++-- .../RuntimeInvisibleAnnotationsAttribute.java | 4 ++-- .../RuntimeInvisibleParameterAnnotationsAttribute.java | 4 ++-- .../RuntimeInvisibleTypeAnnotationsAttribute.java | 4 ++-- .../attribute/RuntimeVisibleAnnotationsAttribute.java | 4 ++-- .../RuntimeVisibleParameterAnnotationsAttribute.java | 4 ++-- .../RuntimeVisibleTypeAnnotationsAttribute.java | 4 ++-- .../lang/classfile/attribute/SignatureAttribute.java | 4 ++-- .../lang/classfile/attribute/SourceFileAttribute.java | 4 ++-- .../lang/classfile/attribute/StackMapFrameInfo.java | 4 ++-- .../classfile/attribute/StackMapTableAttribute.java | 4 ++-- .../lang/classfile/attribute/SyntheticAttribute.java | 4 ++-- .../classfile/constantpool/ConstantPoolBuilder.java | 2 +- .../lang/classfile/constantpool/MethodHandleEntry.java | 4 ++-- .../lang/classfile/instruction/InvokeInstruction.java | 4 ++-- 40 files changed, 76 insertions(+), 76 deletions(-) diff --git a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java index a4a1c218ac1..04bbcffb8bc 100644 --- a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java +++ b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java @@ -426,7 +426,7 @@ default ClassDesc classSymbol() { } /** - * {@return the tag character for this type as per {@jvms 4.7.16.1}} + * {@return the tag character for this type as per JVMS {@jvms 4.7.16.1}} */ char tag(); diff --git a/src/java.base/share/classes/java/lang/classfile/Attribute.java b/src/java.base/share/classes/java/lang/classfile/Attribute.java index 718f164e8ef..b9e3df8d2a2 100644 --- a/src/java.base/share/classes/java/lang/classfile/Attribute.java +++ b/src/java.base/share/classes/java/lang/classfile/Attribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models a classfile attribute {@jvms 4.7}. Many, though not all, subtypes of + * Models a classfile attribute (JVMS {@jvms 4.7}). Many, though not all, subtypes of * {@linkplain Attribute} will implement {@link ClassElement}, {@link * MethodElement}, {@link FieldElement}, or {@link CodeElement}; attributes that * are also elements will be delivered when traversing the elements of the diff --git a/src/java.base/share/classes/java/lang/classfile/ClassSignature.java b/src/java.base/share/classes/java/lang/classfile/ClassSignature.java index 5c55950547e..3b6b15f59f8 100644 --- a/src/java.base/share/classes/java/lang/classfile/ClassSignature.java +++ b/src/java.base/share/classes/java/lang/classfile/ClassSignature.java @@ -30,7 +30,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the generic signature of a class file, as defined by {@jvms 4.7.9}. + * Models the generic signature of a class file, as defined by JVMS {@jvms 4.7.9}. * * @since 22 */ diff --git a/src/java.base/share/classes/java/lang/classfile/MethodSignature.java b/src/java.base/share/classes/java/lang/classfile/MethodSignature.java index 0d5a77faa6c..5e758b64be3 100644 --- a/src/java.base/share/classes/java/lang/classfile/MethodSignature.java +++ b/src/java.base/share/classes/java/lang/classfile/MethodSignature.java @@ -32,7 +32,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the generic signature of a method, as defined by {@jvms 4.7.9}. + * Models the generic signature of a method, as defined by JVMS {@jvms 4.7.9}. * * @since 22 */ diff --git a/src/java.base/share/classes/java/lang/classfile/Opcode.java b/src/java.base/share/classes/java/lang/classfile/Opcode.java index 6ce231d43d0..9128a0e79d3 100644 --- a/src/java.base/share/classes/java/lang/classfile/Opcode.java +++ b/src/java.base/share/classes/java/lang/classfile/Opcode.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Describes the opcodes of the JVM instruction set, as described in {@jvms 6.5}. + * Describes the opcodes of the JVM instruction set, as described in JVMS {@jvms 6.5}. * As well as a number of pseudo-instructions that may be encountered when * traversing the instructions of a method. * diff --git a/src/java.base/share/classes/java/lang/classfile/Signature.java b/src/java.base/share/classes/java/lang/classfile/Signature.java index 22ca477f4e6..739c3f1f3f3 100644 --- a/src/java.base/share/classes/java/lang/classfile/Signature.java +++ b/src/java.base/share/classes/java/lang/classfile/Signature.java @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models generic Java type signatures, as defined in {@jvms 4.7.9.1}. + * Models generic Java type signatures, as defined in JVMS {@jvms 4.7.9.1}. * * @sealedGraph * @since 22 diff --git a/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java b/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java index add1872f641..01a2f5fc696 100644 --- a/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java +++ b/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models an annotation on a type use, as defined in {@jvms 4.7.19} and {@jvms 4.7.20}. + * Models an annotation on a type use, as defined in JVMS {@jvms 4.7.19} and {@jvms 4.7.20}. * * @see RuntimeVisibleTypeAnnotationsAttribute * @see RuntimeInvisibleTypeAnnotationsAttribute @@ -73,7 +73,7 @@ public sealed interface TypeAnnotation permits UnboundAttribute.UnboundTypeAnnotation { /** - * The kind of target on which the annotation appears, as defined in {@jvms 4.7.20.1}. + * The kind of target on which the annotation appears, as defined in JVMS {@jvms 4.7.20.1}. * * @since 22 */ @@ -773,7 +773,7 @@ sealed interface TypeArgumentTarget extends TargetInfo /** * JVMS: Type_path structure identifies which part of the type is annotated, - * as defined in {@jvms 4.7.20.2} + * as defined in JVMS {@jvms 4.7.20.2} * * @since 22 */ @@ -782,7 +782,7 @@ sealed interface TypePathComponent permits UnboundAttribute.TypePathComponentImpl { /** - * Type path kind, as defined in {@jvms 4.7.20.2} + * Type path kind, as defined in JVMS {@jvms 4.7.20.2} * * @since 22 */ diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/AnnotationDefaultAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/AnnotationDefaultAttribute.java index 16149ff66c5..018fcd65a34 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/AnnotationDefaultAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/AnnotationDefaultAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code AnnotationDefault} attribute {@jvms 4.7.22}, which can + * Models the {@code AnnotationDefault} attribute (JVMS {@jvms 4.7.22}), which can * appear on methods of annotation types, and records the default value * {@jls 9.6.2} for the element corresponding to this method. Delivered as a * {@link MethodElement} when traversing the elements of a {@link MethodModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/BootstrapMethodsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/BootstrapMethodsAttribute.java index 436a7f80b1c..7cc784af1f4 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/BootstrapMethodsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/BootstrapMethodsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code BootstrapMethods} attribute {@jvms 4.7.23}, which serves as + * Models the {@code BootstrapMethods} attribute (JVMS {@jvms 4.7.23}), which serves as * an extension to the constant pool of a classfile. Elements of the bootstrap * method table are accessed through {@link ConstantPool}. *

diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/CodeAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/CodeAttribute.java index 7a4f5886580..f0bab0fde11 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/CodeAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/CodeAttribute.java @@ -32,7 +32,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Code} attribute {@jvms 4.7.3}, appears on non-native, + * Models the {@code Code} attribute (JVMS {@jvms 4.7.3}), appears on non-native, * non-abstract methods and contains the bytecode of the method body. Delivered * as a {@link java.lang.classfile.MethodElement} when traversing the elements of a * {@link java.lang.classfile.MethodModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/ConstantValueAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/ConstantValueAttribute.java index c53b438f35f..390320c679c 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/ConstantValueAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/ConstantValueAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code ConstantValue} attribute {@jvms 4.7.2}, which can appear on + * Models the {@code ConstantValue} attribute (JVMS {@jvms 4.7.2}), which can appear on * fields and indicates that the field's value is a constant. Delivered as a * {@link java.lang.classfile.FieldElement} when traversing the elements of a * {@link java.lang.classfile.FieldModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/DeprecatedAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/DeprecatedAttribute.java index 441271d55ef..f65f01f4e2a 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/DeprecatedAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/DeprecatedAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Deprecated} attribute {@jvms 4.7.15}, which can appear on + * Models the {@code Deprecated} attribute (JVMS {@jvms 4.7.15}), which can appear on * classes, methods, and fields. Delivered as a {@link ClassElement}, * {@link MethodElement}, or {@link FieldElement} when traversing the elements * of a corresponding model. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/EnclosingMethodAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/EnclosingMethodAttribute.java index 3079acee89b..768019fa1d3 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/EnclosingMethodAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/EnclosingMethodAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code EnclosingMethod} attribute {@jvms 4.7.7}, which can appear + * Models the {@code EnclosingMethod} attribute (JVMS {@jvms 4.7.7}), which can appear * on classes, and indicates that the class is a local or anonymous class. * Delivered as a {@link ClassElement} when traversing the elements of a {@link * java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/ExceptionsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/ExceptionsAttribute.java index 5af4476c923..e12b9eb9fc3 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/ExceptionsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/ExceptionsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Exceptions} attribute {@jvms 4.7.5}, which can appear on + * Models the {@code Exceptions} attribute (JVMS {@jvms 4.7.5}), which can appear on * methods, and records the exceptions declared to be thrown by this method. * Delivered as a {@link MethodElement} when traversing the elements of a * {@link java.lang.classfile.MethodModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/InnerClassesAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/InnerClassesAttribute.java index 2089b601e3b..0746d20b5b8 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/InnerClassesAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/InnerClassesAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code InnerClasses} attribute {@jvms 4.7.6}, which can + * Models the {@code InnerClasses} attribute (JVMS {@jvms 4.7.6}), which can * appear on classes, and records which classes referenced by this classfile * are inner classes. Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/LineNumberTableAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/LineNumberTableAttribute.java index 5b211487c06..a31f4919688 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/LineNumberTableAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/LineNumberTableAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code LineNumberTable} attribute {@jvms 4.7.12}, which can appear + * Models the {@code LineNumberTable} attribute (JVMS {@jvms 4.7.12}), which can appear * on a {@code Code} attribute, and records the mapping between indexes into * the code table and line numbers in the source file. * Delivered as a {@link java.lang.classfile.instruction.LineNumber} when traversing the diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTableAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTableAttribute.java index 2bcdaddaea9..745b6d8e8e2 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTableAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTableAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code LocalVariableTable} attribute {@jvms 4.7.13}, which can appear + * Models the {@code LocalVariableTable} attribute (JVMS {@jvms 4.7.13}), which can appear * on a {@code Code} attribute, and records debug information about local * variables. * Delivered as a {@link java.lang.classfile.instruction.LocalVariable} when traversing the diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTypeTableAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTypeTableAttribute.java index abd3d480b9f..c2475530e6c 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTypeTableAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/LocalVariableTypeTableAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code LocalVariableTypeTable} attribute {@jvms 4.7.14}, which can appear + * Models the {@code LocalVariableTypeTable} attribute (JVMS {@jvms 4.7.14}), which can appear * on a {@code Code} attribute, and records debug information about local * variables. * Delivered as a {@link java.lang.classfile.instruction.LocalVariable} when traversing the diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/MethodParametersAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/MethodParametersAttribute.java index d38e5e59797..81530086cdf 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/MethodParametersAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/MethodParametersAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code MethodParameters} attribute {@jvms 4.7.24}, which can + * Models the {@code MethodParameters} attribute (JVMS {@jvms 4.7.24}), which can * appear on methods, and records optional information about the method's * parameters. Delivered as a {@link java.lang.classfile.MethodElement} when * traversing the elements of a {@link java.lang.classfile.MethodModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/ModuleAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/ModuleAttribute.java index 9a4c58478ad..ec258611c70 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/ModuleAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/ModuleAttribute.java @@ -46,7 +46,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Module} attribute {@jvms 4.7.25}, which can + * Models the {@code Module} attribute (JVMS {@jvms 4.7.25}), which can * appear on classes that represent module descriptors. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/ModuleMainClassAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/ModuleMainClassAttribute.java index 086017ba9aa..91fe3c8f2d7 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/ModuleMainClassAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/ModuleMainClassAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code ModuleMainClass} attribute {@jvms 4.7.27}, which can + * Models the {@code ModuleMainClass} attribute (JVMS {@jvms 4.7.27}), which can * appear on classes that represent module descriptors. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/ModulePackagesAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/ModulePackagesAttribute.java index 7bf2ac8ce62..30bc0e9827b 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/ModulePackagesAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/ModulePackagesAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code ModulePackages} attribute {@jvms 4.7.26}, which can + * Models the {@code ModulePackages} attribute (JVMS {@jvms 4.7.26}), which can * appear on classes that represent module descriptors. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/NestHostAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/NestHostAttribute.java index 7c60835385a..0961601e5d4 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/NestHostAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/NestHostAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code NestHost} attribute {@jvms 4.7.28}, which can + * Models the {@code NestHost} attribute (JVMS {@jvms 4.7.28}), which can * appear on classes to indicate that this class is a member of a nest. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/NestMembersAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/NestMembersAttribute.java index 72d4e8cca7d..f184df7fff5 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/NestMembersAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/NestMembersAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code NestMembers} attribute {@jvms 4.7.29}, which can + * Models the {@code NestMembers} attribute (JVMS {@jvms 4.7.29}), which can * appear on classes to indicate that this class is the host of a nest. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/PermittedSubclassesAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/PermittedSubclassesAttribute.java index e6aaa6ad4a6..2d86d4d26e0 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/PermittedSubclassesAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/PermittedSubclassesAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code PermittedSubclasses} attribute {@jvms 4.7.31}, which can + * Models the {@code PermittedSubclasses} attribute (JVMS {@jvms 4.7.31}), which can * appear on classes to indicate which classes may extend this class. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RecordAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RecordAttribute.java index 16d01943bc3..f79538a135c 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RecordAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RecordAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Record} attribute {@jvms 4.7.30}, which can + * Models the {@code Record} attribute (JVMS {@jvms 4.7.30}), which can * appear on classes to indicate that this class is a record class. * Delivered as a {@link java.lang.classfile.ClassElement} when * traversing the elements of a {@link java.lang.classfile.ClassModel}. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleAnnotationsAttribute.java index 485e4d2af59..b467e8504fe 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code RuntimeInvisibleAnnotations} attribute {@jvms 4.7.17}, which + * Models the {@code RuntimeInvisibleAnnotations} attribute (JVMS {@jvms 4.7.17}), which * can appear on classes, methods, and fields. Delivered as a * {@link java.lang.classfile.ClassElement}, {@link java.lang.classfile.FieldElement}, or * {@link java.lang.classfile.MethodElement} when traversing the corresponding model type. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleParameterAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleParameterAnnotationsAttribute.java index 4865dde3411..af495788afa 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleParameterAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleParameterAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ /** * Models the {@code RuntimeInvisibleParameterAnnotations} attribute - * {@jvms 4.7.19}, which can appear on methods. Delivered as a {@link + * (JVMS {@jvms 4.7.19}), which can appear on methods. Delivered as a {@link * java.lang.classfile.MethodElement} when traversing a {@link MethodModel}. *

* The attribute does not permit multiple instances in a given location. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleTypeAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleTypeAnnotationsAttribute.java index 830e9778a47..46dd2167541 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleTypeAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeInvisibleTypeAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code RuntimeInvisibleTypeAnnotations} attribute {@jvms 4.7.21}, which + * Models the {@code RuntimeInvisibleTypeAnnotations} attribute (JVMS {@jvms 4.7.21}), which * can appear on classes, methods, fields, and code attributes. Delivered as a * {@link java.lang.classfile.ClassElement}, {@link java.lang.classfile.FieldElement}, * {@link java.lang.classfile.MethodElement}, or {@link CodeElement} when traversing diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleAnnotationsAttribute.java index 77a49b8d13b..4454dac62a9 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code RuntimeVisibleAnnotations} attribute {@jvms 4.7.16}, which + * Models the {@code RuntimeVisibleAnnotations} attribute (JVMS {@jvms 4.7.16}), which * can appear on classes, methods, and fields. Delivered as a * {@link java.lang.classfile.ClassElement}, {@link java.lang.classfile.FieldElement}, or * {@link java.lang.classfile.MethodElement} when traversing the corresponding model type. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleParameterAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleParameterAnnotationsAttribute.java index 7923493499a..59f648199ca 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleParameterAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleParameterAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code RuntimeVisibleParameterAnnotations} attribute {@jvms 4.7.18}, which + * Models the {@code RuntimeVisibleParameterAnnotations} attribute (JVMS {@jvms 4.7.18}), which * can appear on methods. Delivered as a {@link java.lang.classfile.MethodElement} * when traversing a {@link MethodModel}. * diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleTypeAnnotationsAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleTypeAnnotationsAttribute.java index 3cf38811d5a..8bdf322803d 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleTypeAnnotationsAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/RuntimeVisibleTypeAnnotationsAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code RuntimeVisibleTypeAnnotations} attribute {@jvms 4.7.20}, which + * Models the {@code RuntimeVisibleTypeAnnotations} attribute (JVMS {@jvms 4.7.20}), which * can appear on classes, methods, fields, and code attributes. Delivered as a * {@link java.lang.classfile.ClassElement}, {@link java.lang.classfile.FieldElement}, * {@link java.lang.classfile.MethodElement}, or {@link CodeElement} when traversing diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/SignatureAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/SignatureAttribute.java index e77382f2fd4..783b068c4e6 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/SignatureAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/SignatureAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Signature} attribute {@jvms 4.7.9}, which + * Models the {@code Signature} attribute (JVMS {@jvms 4.7.9}), which * can appear on classes, methods, or fields. Delivered as a * {@link java.lang.classfile.ClassElement}, {@link java.lang.classfile.FieldElement}, or * {@link java.lang.classfile.MethodElement} when traversing diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/SourceFileAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/SourceFileAttribute.java index 58e1ab47418..abfa4eb05ff 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/SourceFileAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/SourceFileAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code SourceFile} attribute {@jvms 4.7.10}, which + * Models the {@code SourceFile} attribute (JVMS {@jvms 4.7.10}), which * can appear on classes. Delivered as a {@link java.lang.classfile.ClassElement} * when traversing a {@link ClassModel}. *

diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/StackMapFrameInfo.java b/src/java.base/share/classes/java/lang/classfile/attribute/StackMapFrameInfo.java index 08736444442..46caa66ef9a 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/StackMapFrameInfo.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/StackMapFrameInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models stack map frame of {@code StackMapTable} attribute {@jvms 4.7.4}. + * Models stack map frame of {@code StackMapTable} attribute (JVMS {@jvms 4.7.4}). * * @since 22 */ diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/StackMapTableAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/StackMapTableAttribute.java index d3ec67cab63..74dd567060e 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/StackMapTableAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/StackMapTableAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code StackMapTable} attribute {@jvms 4.7.4}, which can appear + * Models the {@code StackMapTable} attribute (JVMS {@jvms 4.7.4}), which can appear * on a {@code Code} attribute. *

* The attribute does not permit multiple instances in a given location. diff --git a/src/java.base/share/classes/java/lang/classfile/attribute/SyntheticAttribute.java b/src/java.base/share/classes/java/lang/classfile/attribute/SyntheticAttribute.java index 6bc9c19b851..d1b7b8f0384 100644 --- a/src/java.base/share/classes/java/lang/classfile/attribute/SyntheticAttribute.java +++ b/src/java.base/share/classes/java/lang/classfile/attribute/SyntheticAttribute.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the {@code Synthetic} attribute {@jvms 4.7.8}, which can appear on + * Models the {@code Synthetic} attribute (JVMS {@jvms 4.7.8}), which can appear on * classes, methods, and fields. Delivered as a {@link ClassElement}, * {@link MethodElement}, or {@link FieldElement} when traversing the elements * of a corresponding model. diff --git a/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java b/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java index db77f8e2e8a..7334d8e5460 100644 --- a/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/constantpool/ConstantPoolBuilder.java @@ -365,7 +365,7 @@ default MethodHandleEntry methodHandleEntry(DirectMethodHandleDesc descriptor) { * it is returned; otherwise, a new entry is added and the new entry is * returned. * - * @param refKind the reference kind of the method handle {@jvms 4.4.8} + * @param refKind the reference kind of the method handle (JVMS {@jvms 4.4.8}) * @param reference the constant pool entry describing the field or method */ MethodHandleEntry methodHandleEntry(int refKind, MemberRefEntry reference); diff --git a/src/java.base/share/classes/java/lang/classfile/constantpool/MethodHandleEntry.java b/src/java.base/share/classes/java/lang/classfile/constantpool/MethodHandleEntry.java index 76899100027..37ec30648ab 100644 --- a/src/java.base/share/classes/java/lang/classfile/constantpool/MethodHandleEntry.java +++ b/src/java.base/share/classes/java/lang/classfile/constantpool/MethodHandleEntry.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ default ConstantDesc constantValue() { } /** - * {@return the reference kind of this method handle {@jvms 4.4.8}} + * {@return the reference kind of this method handle (JVMS {@jvms 4.4.8})} * @see java.lang.invoke.MethodHandleInfo */ int kind(); diff --git a/src/java.base/share/classes/java/lang/classfile/instruction/InvokeInstruction.java b/src/java.base/share/classes/java/lang/classfile/instruction/InvokeInstruction.java index 79a2142eb65..ff68abce3d2 100644 --- a/src/java.base/share/classes/java/lang/classfile/instruction/InvokeInstruction.java +++ b/src/java.base/share/classes/java/lang/classfile/instruction/InvokeInstruction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ public sealed interface InvokeInstruction extends Instruction boolean isInterface(); /** - * {@return the {@code count} value of an {@code invokeinterface} instruction, as defined in {@jvms 6.5} + * {@return the {@code count} value of an {@code invokeinterface} instruction, as defined in JVMS {@jvms 6.5} * or {@code 0} for {@code invokespecial}, {@code invokestatic} and {@code invokevirtual} instructions} */ int count(); From 38591315058e6d3b764ca325facc5bf46bf7b16b Mon Sep 17 00:00:00 2001 From: Fei Gao Date: Thu, 15 Aug 2024 15:16:14 +0000 Subject: [PATCH 05/67] 8338442: AArch64: Clean up IndOffXX type and let legitimize_address() fix out-of-range operands Reviewed-by: aph, dlong --- src/hotspot/cpu/aarch64/aarch64.ad | 393 ++++-------------- src/hotspot/cpu/aarch64/aarch64_vector.ad | 16 +- src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 | 2 +- src/hotspot/cpu/aarch64/ad_encode.m4 | 8 +- src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad | 2 +- src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad | 2 +- .../compiler/c2/TestUnalignedAccess.java | 61 ++- 7 files changed, 123 insertions(+), 361 deletions(-) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 66a24a61f29..a96c47051f2 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -2720,7 +2720,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, { Address addr = mem2address(opcode, base, index, scale, disp); if (addr.getMode() == Address::base_plus_offset) { - /* Fix up any out-of-range offsets. */ + // Fix up any out-of-range offsets. assert_different_registers(rscratch1, base); assert_different_registers(rscratch1, reg); addr = __ legitimize_address(addr, size_in_memory, rscratch1); @@ -2761,7 +2761,11 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, int opcode, Register base, int index, int size, int disp) { if (index == -1) { - (masm->*insn)(reg, T, Address(base, disp)); + // Fix up any out-of-range offsets. + assert_different_registers(rscratch1, base); + Address addr = Address(base, disp); + addr = __ legitimize_address(addr, (1 << T), rscratch1); + (masm->*insn)(reg, T, addr); } else { assert(disp == 0, "unsupported address mode"); (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size))); @@ -2816,7 +2820,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{ + enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2824,7 +2828,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{ + enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2832,7 +2836,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{ + enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2840,7 +2844,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{ + enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2848,7 +2852,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{ + enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2856,7 +2860,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{ + enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2864,7 +2868,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{ + enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2872,7 +2876,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{ + enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2880,7 +2884,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{ + enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2888,7 +2892,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{ + enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2896,7 +2900,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{ + enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2904,7 +2908,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{ + enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -2912,7 +2916,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{ + enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2920,7 +2924,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{ + enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -2928,7 +2932,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{ + enc_class aarch64_enc_strb(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -2936,14 +2940,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0(memory1 mem) %{ + enc_class aarch64_enc_strb0(memory mem) %{ loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{ + enc_class aarch64_enc_strh(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); @@ -2951,14 +2955,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strh0(memory2 mem) %{ + enc_class aarch64_enc_strh0(memory mem) %{ loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{ + enc_class aarch64_enc_strw(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -2966,14 +2970,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strw0(memory4 mem) %{ + enc_class aarch64_enc_strw0(memory mem) %{ loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_str(iRegL src, memory8 mem) %{ + enc_class aarch64_enc_str(iRegL src, memory mem) %{ Register src_reg = as_Register($src$$reg); // we sometimes get asked to store the stack pointer into the // current thread -- we cannot do that directly on AArch64 @@ -2988,14 +2992,14 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_str0(memory8 mem) %{ + enc_class aarch64_enc_str0(memory mem) %{ loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); %} // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{ + enc_class aarch64_enc_strs(vRegF src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4); @@ -3003,7 +3007,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{ + enc_class aarch64_enc_strd(vRegD src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8); @@ -3011,7 +3015,7 @@ encode %{ // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0_ordered(memory4 mem) %{ + enc_class aarch64_enc_strb0_ordered(memory mem) %{ __ membar(Assembler::StoreStore); loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); @@ -3213,7 +3217,7 @@ encode %{ // synchronized read/update encodings - enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{ + enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); Register base = as_Register($mem$$base); int index = $mem$$index; @@ -3241,7 +3245,7 @@ encode %{ } %} - enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{ + enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{ Register src_reg = as_Register($src$$reg); Register base = as_Register($mem$$base); int index = $mem$$index; @@ -4169,60 +4173,10 @@ operand immIU7() interface(CONST_INTER); %} -// Offset for scaled or unscaled immediate loads and stores +// Offset for immediate loads and stores operand immIOffset() %{ - predicate(Address::offset_ok_for_immed(n->get_int(), 0)); - match(ConI); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immIOffset1() -%{ - predicate(Address::offset_ok_for_immed(n->get_int(), 0)); - match(ConI); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immIOffset2() -%{ - predicate(Address::offset_ok_for_immed(n->get_int(), 1)); - match(ConI); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immIOffset4() -%{ - predicate(Address::offset_ok_for_immed(n->get_int(), 2)); - match(ConI); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immIOffset8() -%{ - predicate(Address::offset_ok_for_immed(n->get_int(), 3)); - match(ConI); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immIOffset16() -%{ - predicate(Address::offset_ok_for_immed(n->get_int(), 4)); + predicate(n->get_int() >= -256 && n->get_int() <= 65520); match(ConI); op_cost(0); @@ -4240,56 +4194,6 @@ operand immLOffset() interface(CONST_INTER); %} -operand immLoffset1() -%{ - predicate(Address::offset_ok_for_immed(n->get_long(), 0)); - match(ConL); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immLoffset2() -%{ - predicate(Address::offset_ok_for_immed(n->get_long(), 1)); - match(ConL); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immLoffset4() -%{ - predicate(Address::offset_ok_for_immed(n->get_long(), 2)); - match(ConL); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immLoffset8() -%{ - predicate(Address::offset_ok_for_immed(n->get_long(), 3)); - match(ConL); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - -operand immLoffset16() -%{ - predicate(Address::offset_ok_for_immed(n->get_long(), 4)); - match(ConL); - - op_cost(0); - format %{ %} - interface(CONST_INTER); -%} - // 5 bit signed long integer operand immL5() %{ @@ -5202,21 +5106,7 @@ operand indIndex(iRegP reg, iRegL lreg) %} %} -operand indOffI1(iRegP reg, immIOffset1 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffI2(iRegP reg, immIOffset2 off) +operand indOffI(iRegP reg, immIOffset off) %{ constraint(ALLOC_IN_RC(ptr_reg)); match(AddP reg off); @@ -5230,105 +5120,7 @@ operand indOffI2(iRegP reg, immIOffset2 off) %} %} -operand indOffI4(iRegP reg, immIOffset4 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffI8(iRegP reg, immIOffset8 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffI16(iRegP reg, immIOffset16 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffL1(iRegP reg, immLoffset1 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffL2(iRegP reg, immLoffset2 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffL4(iRegP reg, immLoffset4 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffL8(iRegP reg, immLoffset8 off) -%{ - constraint(ALLOC_IN_RC(ptr_reg)); - match(AddP reg off); - op_cost(0); - format %{ "[$reg, $off]" %} - interface(MEMORY_INTER) %{ - base($reg); - index(0xffffffff); - scale(0x0); - disp($off); - %} -%} - -operand indOffL16(iRegP reg, immLoffset16 off) +operand indOffL(iRegP reg, immLOffset off) %{ constraint(ALLOC_IN_RC(ptr_reg)); match(AddP reg off); @@ -5704,10 +5496,7 @@ operand iRegL2P(iRegL reg) %{ interface(REG_INTER) %} -opclass vmem2(indirect, indIndex, indOffI2, indOffL2); -opclass vmem4(indirect, indIndex, indOffI4, indOffL4); -opclass vmem8(indirect, indIndex, indOffI8, indOffL8); -opclass vmem16(indirect, indIndex, indOffI16, indOffL16); +opclass vmem(indirect, indIndex, indOffI, indOffL, indOffIN, indOffLN); //----------OPERAND CLASSES---------------------------------------------------- // Operand Classes are groups of operands that are used as to simplify @@ -5719,23 +5508,9 @@ opclass vmem16(indirect, indIndex, indOffI16, indOffL16); // memory is used to define read/write location for load/store // instruction defs. we can turn a memory op into an Address -opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P); - -opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P); - -opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); - -opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); - -// All of the memory operands. For the pipeline description. -opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, - indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8, - indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); - +opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL, + indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, + indOffLN, indirectX2P, indOffX2P); // iRegIorL2I is used for src inputs in rules for 32 bit int (I) // operations. it allows the src to be either an iRegI or a (ConvL2I @@ -6437,7 +6212,7 @@ define %{ // Load Instructions // Load Byte (8 bit signed) -instruct loadB(iRegINoSp dst, memory1 mem) +instruct loadB(iRegINoSp dst, memory mem) %{ match(Set dst (LoadB mem)); predicate(!needs_acquiring_load(n)); @@ -6451,7 +6226,7 @@ instruct loadB(iRegINoSp dst, memory1 mem) %} // Load Byte (8 bit signed) into long -instruct loadB2L(iRegLNoSp dst, memory1 mem) +instruct loadB2L(iRegLNoSp dst, memory mem) %{ match(Set dst (ConvI2L (LoadB mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6465,7 +6240,7 @@ instruct loadB2L(iRegLNoSp dst, memory1 mem) %} // Load Byte (8 bit unsigned) -instruct loadUB(iRegINoSp dst, memory1 mem) +instruct loadUB(iRegINoSp dst, memory mem) %{ match(Set dst (LoadUB mem)); predicate(!needs_acquiring_load(n)); @@ -6479,7 +6254,7 @@ instruct loadUB(iRegINoSp dst, memory1 mem) %} // Load Byte (8 bit unsigned) into long -instruct loadUB2L(iRegLNoSp dst, memory1 mem) +instruct loadUB2L(iRegLNoSp dst, memory mem) %{ match(Set dst (ConvI2L (LoadUB mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6493,7 +6268,7 @@ instruct loadUB2L(iRegLNoSp dst, memory1 mem) %} // Load Short (16 bit signed) -instruct loadS(iRegINoSp dst, memory2 mem) +instruct loadS(iRegINoSp dst, memory mem) %{ match(Set dst (LoadS mem)); predicate(!needs_acquiring_load(n)); @@ -6507,7 +6282,7 @@ instruct loadS(iRegINoSp dst, memory2 mem) %} // Load Short (16 bit signed) into long -instruct loadS2L(iRegLNoSp dst, memory2 mem) +instruct loadS2L(iRegLNoSp dst, memory mem) %{ match(Set dst (ConvI2L (LoadS mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6521,7 +6296,7 @@ instruct loadS2L(iRegLNoSp dst, memory2 mem) %} // Load Char (16 bit unsigned) -instruct loadUS(iRegINoSp dst, memory2 mem) +instruct loadUS(iRegINoSp dst, memory mem) %{ match(Set dst (LoadUS mem)); predicate(!needs_acquiring_load(n)); @@ -6535,7 +6310,7 @@ instruct loadUS(iRegINoSp dst, memory2 mem) %} // Load Short/Char (16 bit unsigned) into long -instruct loadUS2L(iRegLNoSp dst, memory2 mem) +instruct loadUS2L(iRegLNoSp dst, memory mem) %{ match(Set dst (ConvI2L (LoadUS mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6549,7 +6324,7 @@ instruct loadUS2L(iRegLNoSp dst, memory2 mem) %} // Load Integer (32 bit signed) -instruct loadI(iRegINoSp dst, memory4 mem) +instruct loadI(iRegINoSp dst, memory mem) %{ match(Set dst (LoadI mem)); predicate(!needs_acquiring_load(n)); @@ -6563,7 +6338,7 @@ instruct loadI(iRegINoSp dst, memory4 mem) %} // Load Integer (32 bit signed) into long -instruct loadI2L(iRegLNoSp dst, memory4 mem) +instruct loadI2L(iRegLNoSp dst, memory mem) %{ match(Set dst (ConvI2L (LoadI mem))); predicate(!needs_acquiring_load(n->in(1))); @@ -6577,7 +6352,7 @@ instruct loadI2L(iRegLNoSp dst, memory4 mem) %} // Load Integer (32 bit unsigned) into long -instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask) +instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask) %{ match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load())); @@ -6591,7 +6366,7 @@ instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask) %} // Load Long (64 bit signed) -instruct loadL(iRegLNoSp dst, memory8 mem) +instruct loadL(iRegLNoSp dst, memory mem) %{ match(Set dst (LoadL mem)); predicate(!needs_acquiring_load(n)); @@ -6605,7 +6380,7 @@ instruct loadL(iRegLNoSp dst, memory8 mem) %} // Load Range -instruct loadRange(iRegINoSp dst, memory4 mem) +instruct loadRange(iRegINoSp dst, memory mem) %{ match(Set dst (LoadRange mem)); @@ -6618,7 +6393,7 @@ instruct loadRange(iRegINoSp dst, memory4 mem) %} // Load Pointer -instruct loadP(iRegPNoSp dst, memory8 mem) +instruct loadP(iRegPNoSp dst, memory mem) %{ match(Set dst (LoadP mem)); predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0)); @@ -6632,7 +6407,7 @@ instruct loadP(iRegPNoSp dst, memory8 mem) %} // Load Compressed Pointer -instruct loadN(iRegNNoSp dst, memory4 mem) +instruct loadN(iRegNNoSp dst, memory mem) %{ match(Set dst (LoadN mem)); predicate(!needs_acquiring_load(n)); @@ -6646,7 +6421,7 @@ instruct loadN(iRegNNoSp dst, memory4 mem) %} // Load Klass Pointer -instruct loadKlass(iRegPNoSp dst, memory8 mem) +instruct loadKlass(iRegPNoSp dst, memory mem) %{ match(Set dst (LoadKlass mem)); predicate(!needs_acquiring_load(n)); @@ -6660,7 +6435,7 @@ instruct loadKlass(iRegPNoSp dst, memory8 mem) %} // Load Narrow Klass Pointer -instruct loadNKlass(iRegNNoSp dst, memory4 mem) +instruct loadNKlass(iRegNNoSp dst, memory mem) %{ match(Set dst (LoadNKlass mem)); predicate(!needs_acquiring_load(n)); @@ -6674,7 +6449,7 @@ instruct loadNKlass(iRegNNoSp dst, memory4 mem) %} // Load Float -instruct loadF(vRegF dst, memory4 mem) +instruct loadF(vRegF dst, memory mem) %{ match(Set dst (LoadF mem)); predicate(!needs_acquiring_load(n)); @@ -6688,7 +6463,7 @@ instruct loadF(vRegF dst, memory4 mem) %} // Load Double -instruct loadD(vRegD dst, memory8 mem) +instruct loadD(vRegD dst, memory mem) %{ match(Set dst (LoadD mem)); predicate(!needs_acquiring_load(n)); @@ -6892,7 +6667,7 @@ instruct loadConD(vRegD dst, immD con) %{ // Store Instructions // Store CMS card-mark Immediate -instruct storeimmCM0(immI0 zero, memory1 mem) +instruct storeimmCM0(immI0 zero, memory mem) %{ match(Set mem (StoreCM mem zero)); @@ -6907,7 +6682,7 @@ instruct storeimmCM0(immI0 zero, memory1 mem) // Store CMS card-mark Immediate with intervening StoreStore // needed when using CMS with no conditional card marking -instruct storeimmCM0_ordered(immI0 zero, memory1 mem) +instruct storeimmCM0_ordered(immI0 zero, memory mem) %{ match(Set mem (StoreCM mem zero)); @@ -6922,7 +6697,7 @@ instruct storeimmCM0_ordered(immI0 zero, memory1 mem) %} // Store Byte -instruct storeB(iRegIorL2I src, memory1 mem) +instruct storeB(iRegIorL2I src, memory mem) %{ match(Set mem (StoreB mem src)); predicate(!needs_releasing_store(n)); @@ -6936,7 +6711,7 @@ instruct storeB(iRegIorL2I src, memory1 mem) %} -instruct storeimmB0(immI0 zero, memory1 mem) +instruct storeimmB0(immI0 zero, memory mem) %{ match(Set mem (StoreB mem zero)); predicate(!needs_releasing_store(n)); @@ -6950,7 +6725,7 @@ instruct storeimmB0(immI0 zero, memory1 mem) %} // Store Char/Short -instruct storeC(iRegIorL2I src, memory2 mem) +instruct storeC(iRegIorL2I src, memory mem) %{ match(Set mem (StoreC mem src)); predicate(!needs_releasing_store(n)); @@ -6963,7 +6738,7 @@ instruct storeC(iRegIorL2I src, memory2 mem) ins_pipe(istore_reg_mem); %} -instruct storeimmC0(immI0 zero, memory2 mem) +instruct storeimmC0(immI0 zero, memory mem) %{ match(Set mem (StoreC mem zero)); predicate(!needs_releasing_store(n)); @@ -6978,7 +6753,7 @@ instruct storeimmC0(immI0 zero, memory2 mem) // Store Integer -instruct storeI(iRegIorL2I src, memory4 mem) +instruct storeI(iRegIorL2I src, memory mem) %{ match(Set mem(StoreI mem src)); predicate(!needs_releasing_store(n)); @@ -6991,7 +6766,7 @@ instruct storeI(iRegIorL2I src, memory4 mem) ins_pipe(istore_reg_mem); %} -instruct storeimmI0(immI0 zero, memory4 mem) +instruct storeimmI0(immI0 zero, memory mem) %{ match(Set mem(StoreI mem zero)); predicate(!needs_releasing_store(n)); @@ -7005,7 +6780,7 @@ instruct storeimmI0(immI0 zero, memory4 mem) %} // Store Long (64 bit signed) -instruct storeL(iRegL src, memory8 mem) +instruct storeL(iRegL src, memory mem) %{ match(Set mem (StoreL mem src)); predicate(!needs_releasing_store(n)); @@ -7019,7 +6794,7 @@ instruct storeL(iRegL src, memory8 mem) %} // Store Long (64 bit signed) -instruct storeimmL0(immL0 zero, memory8 mem) +instruct storeimmL0(immL0 zero, memory mem) %{ match(Set mem (StoreL mem zero)); predicate(!needs_releasing_store(n)); @@ -7033,7 +6808,7 @@ instruct storeimmL0(immL0 zero, memory8 mem) %} // Store Pointer -instruct storeP(iRegP src, memory8 mem) +instruct storeP(iRegP src, memory mem) %{ match(Set mem (StoreP mem src)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -7047,7 +6822,7 @@ instruct storeP(iRegP src, memory8 mem) %} // Store Pointer -instruct storeimmP0(immP0 zero, memory8 mem) +instruct storeimmP0(immP0 zero, memory mem) %{ match(Set mem (StoreP mem zero)); predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0); @@ -7061,7 +6836,7 @@ instruct storeimmP0(immP0 zero, memory8 mem) %} // Store Compressed Pointer -instruct storeN(iRegN src, memory4 mem) +instruct storeN(iRegN src, memory mem) %{ match(Set mem (StoreN mem src)); predicate(!needs_releasing_store(n)); @@ -7074,7 +6849,7 @@ instruct storeN(iRegN src, memory4 mem) ins_pipe(istore_reg_mem); %} -instruct storeImmN0(immN0 zero, memory4 mem) +instruct storeImmN0(immN0 zero, memory mem) %{ match(Set mem (StoreN mem zero)); predicate(!needs_releasing_store(n)); @@ -7088,7 +6863,7 @@ instruct storeImmN0(immN0 zero, memory4 mem) %} // Store Float -instruct storeF(vRegF src, memory4 mem) +instruct storeF(vRegF src, memory mem) %{ match(Set mem (StoreF mem src)); predicate(!needs_releasing_store(n)); @@ -7105,7 +6880,7 @@ instruct storeF(vRegF src, memory4 mem) // implement storeImmF0 and storeFImmPacked // Store Double -instruct storeD(vRegD src, memory8 mem) +instruct storeD(vRegD src, memory mem) %{ match(Set mem (StoreD mem src)); predicate(!needs_releasing_store(n)); @@ -7119,7 +6894,7 @@ instruct storeD(vRegD src, memory8 mem) %} // Store Compressed Klass Pointer -instruct storeNKlass(iRegN src, memory4 mem) +instruct storeNKlass(iRegN src, memory mem) %{ predicate(!needs_releasing_store(n)); match(Set mem (StoreNKlass mem src)); @@ -7138,7 +6913,7 @@ instruct storeNKlass(iRegN src, memory4 mem) // prefetch instructions // Must be safe to execute with invalid address (cannot fault). -instruct prefetchalloc( memory8 mem ) %{ +instruct prefetchalloc( memory mem ) %{ match(PrefetchAllocation mem); ins_cost(INSN_COST); @@ -7707,7 +7482,7 @@ instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{ ins_pipe(pipe_class_default); %} -instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{ +instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{ match(Set dst (PopCountI (LoadI mem))); effect(TEMP tmp); ins_cost(INSN_COST * 13); @@ -7748,7 +7523,7 @@ instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{ ins_pipe(pipe_class_default); %} -instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{ +instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{ match(Set dst (PopCountL (LoadL mem))); effect(TEMP tmp); ins_cost(INSN_COST * 13); @@ -16897,7 +16672,7 @@ instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, ins_pipe(pipe_slow); %} -instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask, +instruct compressBitsI_memcon(iRegINoSp dst, memory mem, immI mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (CompressBits (LoadI mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -16934,7 +16709,7 @@ instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask, ins_pipe(pipe_slow); %} -instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask, +instruct compressBitsL_memcon(iRegLNoSp dst, memory mem, immL mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (CompressBits (LoadL mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -16971,7 +16746,7 @@ instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, ins_pipe(pipe_slow); %} -instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask, +instruct expandBitsI_memcon(iRegINoSp dst, memory mem, immI mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (ExpandBits (LoadI mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); @@ -17009,7 +16784,7 @@ instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask, %} -instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask, +instruct expandBitsL_memcon(iRegINoSp dst, memory mem, immL mask, vRegF tdst, vRegF tsrc, vRegF tmask) %{ match(Set dst (ExpandBits (LoadL mem) mask)); effect(TEMP tdst, TEMP tsrc, TEMP tmask); diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad index 1ebc6408a60..637d3de73af 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector.ad +++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad @@ -345,7 +345,7 @@ source %{ // ------------------------------ Vector load/store ---------------------------- // Load Vector (16 bits) -instruct loadV2(vReg dst, vmem2 mem) %{ +instruct loadV2(vReg dst, vmem mem) %{ predicate(n->as_LoadVector()->memory_size() == 2); match(Set dst (LoadVector mem)); format %{ "loadV2 $dst, $mem\t# vector (16 bits)" %} @@ -354,7 +354,7 @@ instruct loadV2(vReg dst, vmem2 mem) %{ %} // Store Vector (16 bits) -instruct storeV2(vReg src, vmem2 mem) %{ +instruct storeV2(vReg src, vmem mem) %{ predicate(n->as_StoreVector()->memory_size() == 2); match(Set mem (StoreVector mem src)); format %{ "storeV2 $mem, $src\t# vector (16 bits)" %} @@ -363,7 +363,7 @@ instruct storeV2(vReg src, vmem2 mem) %{ %} // Load Vector (32 bits) -instruct loadV4(vReg dst, vmem4 mem) %{ +instruct loadV4(vReg dst, vmem mem) %{ predicate(n->as_LoadVector()->memory_size() == 4); match(Set dst (LoadVector mem)); format %{ "loadV4 $dst, $mem\t# vector (32 bits)" %} @@ -372,7 +372,7 @@ instruct loadV4(vReg dst, vmem4 mem) %{ %} // Store Vector (32 bits) -instruct storeV4(vReg src, vmem4 mem) %{ +instruct storeV4(vReg src, vmem mem) %{ predicate(n->as_StoreVector()->memory_size() == 4); match(Set mem (StoreVector mem src)); format %{ "storeV4 $mem, $src\t# vector (32 bits)" %} @@ -381,7 +381,7 @@ instruct storeV4(vReg src, vmem4 mem) %{ %} // Load Vector (64 bits) -instruct loadV8(vReg dst, vmem8 mem) %{ +instruct loadV8(vReg dst, vmem mem) %{ predicate(n->as_LoadVector()->memory_size() == 8); match(Set dst (LoadVector mem)); format %{ "loadV8 $dst, $mem\t# vector (64 bits)" %} @@ -390,7 +390,7 @@ instruct loadV8(vReg dst, vmem8 mem) %{ %} // Store Vector (64 bits) -instruct storeV8(vReg src, vmem8 mem) %{ +instruct storeV8(vReg src, vmem mem) %{ predicate(n->as_StoreVector()->memory_size() == 8); match(Set mem (StoreVector mem src)); format %{ "storeV8 $mem, $src\t# vector (64 bits)" %} @@ -399,7 +399,7 @@ instruct storeV8(vReg src, vmem8 mem) %{ %} // Load Vector (128 bits) -instruct loadV16(vReg dst, vmem16 mem) %{ +instruct loadV16(vReg dst, vmem mem) %{ predicate(n->as_LoadVector()->memory_size() == 16); match(Set dst (LoadVector mem)); format %{ "loadV16 $dst, $mem\t# vector (128 bits)" %} @@ -408,7 +408,7 @@ instruct loadV16(vReg dst, vmem16 mem) %{ %} // Store Vector (128 bits) -instruct storeV16(vReg src, vmem16 mem) %{ +instruct storeV16(vReg src, vmem mem) %{ predicate(n->as_StoreVector()->memory_size() == 16); match(Set mem (StoreVector mem src)); format %{ "storeV16 $mem, $src\t# vector (128 bits)" %} diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 index 29f92772368..b3403ec82a1 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 +++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 @@ -338,7 +338,7 @@ dnl VECTOR_LOAD_STORE($1, $2, $3, $4, $5 ) dnl VECTOR_LOAD_STORE(type, nbytes, arg_name, nbits, size) define(`VECTOR_LOAD_STORE', ` // ifelse(load, $1, Load, Store) Vector ($4 bits) -instruct $1V$2(vReg $3, vmem$2 mem) %{ +instruct $1V$2(vReg $3, vmem mem) %{ predicate(`n->as_'ifelse(load, $1, Load, Store)Vector()->memory_size() == $2); match(Set ifelse(load, $1, dst (LoadVector mem), mem (StoreVector mem src))); format %{ "$1V$2 ifelse(load, $1, `$dst, $mem', `$mem, $src')\t# vector ($4 bits)" %} diff --git a/src/hotspot/cpu/aarch64/ad_encode.m4 b/src/hotspot/cpu/aarch64/ad_encode.m4 index 008dbd2c936..e3d8ea661b6 100644 --- a/src/hotspot/cpu/aarch64/ad_encode.m4 +++ b/src/hotspot/cpu/aarch64/ad_encode.m4 @@ -34,7 +34,7 @@ define(access, ` define(load,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2($1 dst, memory$5 mem) %{dnl + enc_class aarch64_enc_$2($1 dst, memory mem) %{dnl access(dst,$2,$3,$4,$5)')dnl load(iRegI,ldrsbw,,,1) load(iRegI,ldrsb,,,1) @@ -53,12 +53,12 @@ load(vRegD,ldrd,Float,,8) define(STORE,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2($1 src, memory$5 mem) %{dnl + enc_class aarch64_enc_$2($1 src, memory mem) %{dnl access(src,$2,$3,$4,$5)')dnl define(STORE0,` // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_$2`'0(memory$4 mem) %{ + enc_class aarch64_enc_$2`'0(memory mem) %{ choose(masm,zr,$2,$mem->opcode(), as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl STORE(iRegI,strb,,,1) @@ -82,7 +82,7 @@ STORE(vRegD,strd,Float,,8) // This encoding class is generated automatically from ad_encode.m4. // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE - enc_class aarch64_enc_strb0_ordered(memory4 mem) %{ + enc_class aarch64_enc_strb0_ordered(memory mem) %{ __ membar(Assembler::StoreStore); loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1); diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad index 6e401724baa..5e690a8e47b 100644 --- a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad @@ -51,7 +51,7 @@ static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, %} // Load Pointer -instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) +instruct xLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && !ZGenerational && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() != 0)); diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad index 56d45384779..1510b42bfe9 100644 --- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad +++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad @@ -100,7 +100,7 @@ static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address %} // Load Pointer -instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr) +instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr) %{ match(Set dst (LoadP mem)); predicate(UseZGC && ZGenerational && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); diff --git a/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java b/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java index d05dbad4a73..033ea49e609 100644 --- a/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java +++ b/test/hotspot/jtreg/compiler/c2/TestUnalignedAccess.java @@ -46,20 +46,11 @@ public class TestUnalignedAccess { static final Unsafe UNSAFE = Unsafe.getUnsafe(); static void sink(int x) {} - public static long lseed = 1; - public static int iseed = 2; - public static short sseed = 3; - public static byte bseed = 4; - public static long lres = lseed; - public static int ires = iseed; - public static short sres = sseed; - public static byte bres = bseed; - public static class TestLong { private static final byte[] BYTES = new byte[LEN]; private static final long rawdata = 0xbeef; - private static final long data; + private static final long lseed = 1; static { sink(2); @@ -69,13 +60,10 @@ public static class TestLong { // 1030 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putLongUnaligned(BYTES, 1030, rawdata); - lres += UNSAFE.getLongUnaligned(BYTES, 1030); // 127 can be encoded into simm9 field. - UNSAFE.putLongUnaligned(BYTES, 127, lres); - lres += UNSAFE.getLongUnaligned(BYTES, 127); + UNSAFE.putLongUnaligned(BYTES, 127, rawdata+lseed); // 1096 can be encoded into uimm12 field. - UNSAFE.putLongUnaligned(BYTES, 1096, lres); - data = UNSAFE.getLongUnaligned(BYTES, 1096); + UNSAFE.putLongUnaligned(BYTES, 1096, rawdata-lseed); } } @@ -84,7 +72,7 @@ public static class TestInt { private static final byte[] BYTES = new byte[LEN]; private static final int rawdata = 0xbeef; - private static final int data; + private static final int iseed = 2; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -93,13 +81,10 @@ public static class TestInt { // 274 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putIntUnaligned(BYTES, 274, rawdata); - ires += UNSAFE.getIntUnaligned(BYTES, 274); // 255 can be encoded into simm9 field. - UNSAFE.putIntUnaligned(BYTES, 255, ires); - ires += UNSAFE.getIntUnaligned(BYTES, 255); + UNSAFE.putIntUnaligned(BYTES, 255, rawdata + iseed); // 528 can be encoded into uimm12 field. - UNSAFE.putIntUnaligned(BYTES, 528, ires); - data = UNSAFE.getIntUnaligned(BYTES, 528); + UNSAFE.putIntUnaligned(BYTES, 528, rawdata - iseed); } } @@ -108,7 +93,7 @@ public static class TestShort { private static final byte[] BYTES = new byte[LEN]; private static final short rawdata = (short)0xbeef; - private static final short data; + private static final short sseed = 3; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -117,13 +102,10 @@ public static class TestShort { // 257 can't be encoded as "base + offset" mode into the instruction field. UNSAFE.putShortUnaligned(BYTES, 257, rawdata); - sres = (short) (sres + UNSAFE.getShortUnaligned(BYTES, 257)); // 253 can be encoded into simm9 field. - UNSAFE.putShortUnaligned(BYTES, 253, sres); - sres = (short) (sres + UNSAFE.getShortUnaligned(BYTES, 253)); + UNSAFE.putShortUnaligned(BYTES, 253, (short) (rawdata + sseed)); // 272 can be encoded into uimm12 field. - UNSAFE.putShortUnaligned(BYTES, 272, sres); - data = UNSAFE.getShortUnaligned(BYTES, 272); + UNSAFE.putShortUnaligned(BYTES, 272, (short) (rawdata - sseed)); } } @@ -132,7 +114,7 @@ public static class TestByte { private static final byte[] BYTES = new byte[LEN]; private static final byte rawdata = (byte)0x3f; - private static final byte data; + private static final byte bseed = 4; static { sink(2); // Signed immediate byte offset: range -256 to 255 @@ -141,29 +123,34 @@ public static class TestByte { // 272 can be encoded into simm9 field. UNSAFE.putByte(BYTES, 272, rawdata); - bres = (byte) (bres + UNSAFE.getByte(BYTES, 272)); // 53 can be encoded into simm9 field. - UNSAFE.putByte(BYTES, 53, bres); - bres = (byte) (bres + UNSAFE.getByte(BYTES, 53)); + UNSAFE.putByte(BYTES, 53, (byte) (rawdata + bseed)); // 1027 can be encoded into uimm12 field. - UNSAFE.putByte(BYTES, 1027, bres); - data = UNSAFE.getByte(BYTES, 1027); + UNSAFE.putByte(BYTES, 1027, (byte) (rawdata - bseed)); } } static void test() { TestLong ta = new TestLong(); - Asserts.assertEquals(ta.data, (ta.rawdata + lseed) * 2, "putUnaligned long failed!"); + Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 1030), ta.rawdata, "putUnaligned long failed!"); + Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 127), ta.rawdata + ta.lseed, "putUnaligned long failed!"); + Asserts.assertEquals(UNSAFE.getLongUnaligned(ta.BYTES, 1096), ta.rawdata - ta.lseed, "putUnaligned long failed!"); TestInt tb = new TestInt(); - Asserts.assertEquals(tb.data, (tb.rawdata + iseed) * 2, "putUnaligned int failed!"); + Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 274), tb.rawdata, "putUnaligned int failed!"); + Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 255), tb.rawdata + tb.iseed, "putUnaligned int failed!"); + Asserts.assertEquals(UNSAFE.getIntUnaligned(tb.BYTES, 528), tb.rawdata - tb.iseed, "putUnaligned int failed!"); TestShort tc = new TestShort(); - Asserts.assertEquals(tc.data, (short) (((short) (tc.rawdata + sseed)) * 2), "putUnaligned short failed!"); + Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 257), tc.rawdata, "putUnaligned short failed!"); + Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 253), (short) (tc.rawdata + tc.sseed), "putUnaligned short failed!"); + Asserts.assertEquals(UNSAFE.getShortUnaligned(tc.BYTES, 272), (short) (tc.rawdata - tc.sseed), "putUnaligned short failed!"); TestByte td = new TestByte(); - Asserts.assertEquals(td.data, (byte) (((byte) (td.rawdata + bseed)) * 2), "put byte failed!"); + Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 272), td.rawdata, "put byte failed!"); + Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 53), (byte) (td.rawdata + td.bseed), "put byte failed!"); + Asserts.assertEquals(UNSAFE.getByte(td.BYTES, 1027), (byte) (td.rawdata - td.bseed), "put byte failed!"); } public static void main(String[] strArr) { From 6169613d9f3f0bf019d04a37a1d8f28f1463c17c Mon Sep 17 00:00:00 2001 From: Daniel Fuchs Date: Thu, 15 Aug 2024 15:34:08 +0000 Subject: [PATCH 06/67] 8336655: java/net/httpclient/DigestEchoClient.java IOException: HTTP/1.1 header parser received no bytes Reviewed-by: jpai --- .../jdk/internal/net/http/ConnectionPool.java | 36 +++++++++++++++---- .../jdk/internal/net/http/SocketTube.java | 23 ++++++++++-- .../java/net/httpclient/DigestEchoClient.java | 4 +-- 3 files changed, 52 insertions(+), 11 deletions(-) diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java b/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java index 0ad7b9d5992..edaf53a8a0d 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,7 @@ import jdk.internal.net.http.common.Deadline; import jdk.internal.net.http.common.FlowTube; +import jdk.internal.net.http.common.Log; import jdk.internal.net.http.common.Logger; import jdk.internal.net.http.common.TimeLine; import jdk.internal.net.http.common.TimeSource; @@ -492,13 +493,13 @@ void clear() { // Remove a connection from the pool. // should only be called while holding the ConnectionPool stateLock. - private void removeFromPool(HttpConnection c) { + private boolean removeFromPool(HttpConnection c) { assert stateLock.isHeldByCurrentThread(); if (c instanceof PlainHttpConnection) { - removeFromPool(c, plainPool); + return removeFromPool(c, plainPool); } else { assert c.isSecure() : "connection " + c + " is not secure!"; - removeFromPool(c, sslPool); + return removeFromPool(c, sslPool); } } @@ -529,13 +530,29 @@ void cleanup(HttpConnection c, Throwable error) { debug.log("%s : ConnectionPool.cleanup(%s)", String.valueOf(c.getConnectionFlow()), error); stateLock.lock(); + boolean removed; try { - removeFromPool(c); + removed = removeFromPool(c); expiryList.remove(c); } finally { stateLock.unlock(); } - c.close(); + if (!removed) { + // this should not happen; the cleanup may have consumed + // some data that wasn't supposed to be consumed, so + // the only thing we can do is log it and close the + // connection. + if (Log.errors()) { + Log.logError("WARNING: CleanupTrigger triggered for" + + " a connection not found in the pool: closing {0}", c); + } else if (debug.on()) { + debug.log("WARNING: CleanupTrigger triggered for" + + " a connection not found in the pool: closing %s", c); + } + c.close(new IOException("Unexpected cleanup triggered for non pooled connection")); + } else { + c.close(); + } } /** @@ -549,6 +566,7 @@ private final class CleanupTrigger implements private final HttpConnection connection; private volatile boolean done; + private volatile boolean dropped; public CleanupTrigger(HttpConnection connection) { this.connection = connection; @@ -566,6 +584,7 @@ private void triggerCleanup(Throwable error) { @Override public void onSubscribe(Flow.Subscription subscription) { + if (dropped || done) return; subscription.request(1); } @Override @@ -586,5 +605,10 @@ public void subscribe(Flow.Subscriber> subscriber) { public String toString() { return "CleanupTrigger(" + connection.getConnectionFlow() + ")"; } + + @Override + public void dropSubscription() { + dropped = true; + } } } diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java b/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java index cbdf6633576..9317bdf442a 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -573,6 +573,8 @@ public void subscribe(Flow.Subscriber> s) { debug.log("read publisher: dropping pending subscriber: " + previous.subscriber); previous.errorRef.compareAndSet(null, errorRef.get()); + // make sure no data will be routed to the old subscriber. + previous.stopReading(); previous.signalOnSubscribe(); if (subscriptionImpl.completed) { previous.signalCompletion(); @@ -606,6 +608,7 @@ final class ReadSubscription implements Flow.Subscription { volatile boolean subscribed; volatile boolean cancelled; volatile boolean completed; + volatile boolean stopped; public ReadSubscription(InternalReadSubscription impl, TubeSubscriber subscriber) { @@ -623,11 +626,11 @@ public void cancel() { @Override public void request(long n) { - if (!cancelled) { + if (!cancelled && !stopped) { impl.request(n); } else { if (debug.on()) - debug.log("subscription cancelled, ignoring request %d", n); + debug.log("subscription stopped or cancelled, ignoring request %d", n); } } @@ -661,6 +664,20 @@ void signalOnSubscribe() { signalCompletion(); } } + + /** + * Called when switching subscriber on the {@link InternalReadSubscription}. + * This subscriber is the old subscriber. Demand on the internal + * subscription will be reset and reading will be paused until the + * new subscriber is subscribed. + * This should ensure that no data is routed to this subscriber + * until the new subscriber is subscribed. + */ + void stopReading() { + stopped = true; + impl.demand.reset(); + impl.pauseReadEvent(); + } } final class InternalReadSubscription implements Flow.Subscription { diff --git a/test/jdk/java/net/httpclient/DigestEchoClient.java b/test/jdk/java/net/httpclient/DigestEchoClient.java index 3b6d1a1773f..1450bf09b2d 100644 --- a/test/jdk/java/net/httpclient/DigestEchoClient.java +++ b/test/jdk/java/net/httpclient/DigestEchoClient.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ * @test * @summary this test verifies that a client may provides authorization * headers directly when connecting with a server. - * @bug 8087112 + * @bug 8087112 8336655 * @library /test/lib /test/jdk/java/net/httpclient/lib * @build jdk.httpclient.test.lib.common.HttpServerAdapters jdk.test.lib.net.SimpleSSLContext * DigestEchoServer ReferenceTracker DigestEchoClient From 7d1bbff076c063d066951eedb21de7e694e588b3 Mon Sep 17 00:00:00 2001 From: "lawrence.andrews" Date: Thu, 15 Aug 2024 16:36:15 +0000 Subject: [PATCH 07/67] 8328553: Get rid of JApplet in test/jdk/sanity/client/lib/SwingSet2/src/DemoModule.java Reviewed-by: honkar, prr --- .../client/lib/SwingSet2/src/DemoModule.java | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/test/jdk/sanity/client/lib/SwingSet2/src/DemoModule.java b/test/jdk/sanity/client/lib/SwingSet2/src/DemoModule.java index 3511583e932..0e7c71083dc 100644 --- a/test/jdk/sanity/client/lib/SwingSet2/src/DemoModule.java +++ b/test/jdk/sanity/client/lib/SwingSet2/src/DemoModule.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ import javax.swing.BoxLayout; import javax.swing.Icon; import javax.swing.ImageIcon; -import javax.swing.JApplet; import javax.swing.JFrame; import javax.swing.JPanel; import javax.swing.UIManager; @@ -42,10 +41,8 @@ /** * A generic SwingSet2 demo module - * - * @author Jeff Dinkins */ -public class DemoModule extends JApplet { +public class DemoModule extends JPanel { // The preferred size of the demo private int PREFERRED_WIDTH = 680; @@ -214,10 +211,6 @@ public static void main(String[] args) { demo.mainImpl(); } - public void init() { - getContentPane().setLayout(new BorderLayout()); - getContentPane().add(getDemoPanel(), BorderLayout.CENTER); - } - void updateDragEnabled(boolean dragEnabled) {} -} \ No newline at end of file +} + From ef54af39883e76c80a3e012ed91b90973da51bb4 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 15 Aug 2024 16:45:43 +0000 Subject: [PATCH 08/67] 8338444: Shenandoah: Remove ShenandoahHumongousThreshold tunable Reviewed-by: rkennke, wkemper, ysr --- .../share/gc/shenandoah/shenandoahAsserts.cpp | 2 +- .../gc/shenandoah/shenandoahController.cpp | 4 +- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 5 +- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 4 +- .../gc/shenandoah/shenandoahHeapRegion.cpp | 14 +- .../gc/shenandoah/shenandoahHeapRegion.hpp | 14 +- .../gc/shenandoah/shenandoahInitLogger.cpp | 1 - .../gc/shenandoah/shenandoah_globals.hpp | 7 - .../gc/shenandoah/TestHumongousThreshold.java | 131 ------------------ .../options/TestHumongousThresholdArgs.java | 72 ---------- 10 files changed, 12 insertions(+), 242 deletions(-) delete mode 100644 test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java delete mode 100644 test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 5215aa749ae..5abd7b805f9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -283,7 +283,7 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co } size_t alloc_size = obj->size(); - if (alloc_size > ShenandoahHeapRegion::humongous_threshold_words()) { + if (ShenandoahHeapRegion::requires_humongous(alloc_size)) { size_t idx = r->index(); size_t num_regions = ShenandoahHeapRegion::required_regions(alloc_size * HeapWordSize); for (size_t i = idx; i < idx + num_regions; i++) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index 6d6d21c4066..effa4a8f1fc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -57,7 +57,7 @@ void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, boo ShenandoahHeap* heap = ShenandoahHeap::heap(); assert(current()->is_Java_thread(), "expect Java thread here"); - bool is_humongous = req.size() > ShenandoahHeapRegion::humongous_threshold_words(); + bool is_humongous = ShenandoahHeapRegion::requires_humongous(req.size()); if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure @@ -80,7 +80,7 @@ void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, boo void ShenandoahController::handle_alloc_failure_evac(size_t words) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - bool is_humongous = (words > ShenandoahHeapRegion::region_size_words()); + bool is_humongous = ShenandoahHeapRegion::requires_humongous(words); if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index b47bb109031..3dfc6a79665 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1308,7 +1308,7 @@ void ShenandoahFreeSet::log_status() { HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { shenandoah_assert_heaplocked(); - if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { + if (ShenandoahHeapRegion::requires_humongous(req.size())) { switch (req.type()) { case ShenandoahAllocRequest::_alloc_shared: case ShenandoahAllocRequest::_alloc_shared_gc: @@ -1317,8 +1317,7 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ case ShenandoahAllocRequest::_alloc_gclab: case ShenandoahAllocRequest::_alloc_tlab: in_new_region = false; - assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, - req.size(), ShenandoahHeapRegion::humongous_threshold_words()); + assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size()); return nullptr; default: ShouldNotReachHere(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 1975174b784..e4e2bb4d6e6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -277,14 +277,14 @@ class ShenandoahFreeSet : public CHeapObj { // While holding the heap lock, allocate memory for a single object or LAB which is to be entirely contained // within a single HeapRegion as characterized by req. // - // Precondition: req.size() <= ShenandoahHeapRegion::humongous_threshold_words(). + // Precondition: !ShenandoahHeapRegion::requires_humongous(req.size()) HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region); // While holding the heap lock, allocate memory for a humongous object which spans one or more regions that // were previously empty. Regions that represent humongous objects are entirely dedicated to the humongous // object. No other objects are packed into these regions. // - // Precondition: req.size() > ShenandoahHeapRegion::humongous_threshold_words(). + // Precondition: ShenandoahHeapRegion::requires_humongous(req.size()) HeapWord* allocate_contiguous(ShenandoahAllocRequest& req); // Change region r from the Mutator partition to the GC's Collector partition. This requires that the region is entirely empty. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 8a94b20670a..eceed8dbe43 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -51,8 +51,6 @@ size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; -size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; -size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; @@ -598,18 +596,8 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes; guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions"); - guarantee(HumongousThresholdWords == 0, "we should only set it once"); - HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; - HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment); - assert (HumongousThresholdWords <= RegionSizeWords, "sanity"); - - guarantee(HumongousThresholdBytes == 0, "we should only set it once"); - HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; - assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity"); - guarantee(MaxTLABSizeWords == 0, "we should only set it once"); - MaxTLABSizeWords = MIN2(RegionSizeWords, HumongousThresholdWords); - MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment); + MaxTLABSizeWords = align_down(RegionSizeWords, MinObjAlignment); guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index c5763608582..c34c4c232e4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -217,8 +217,6 @@ class ShenandoahHeapRegion { static size_t RegionSizeWordsShift; static size_t RegionSizeBytesMask; static size_t RegionSizeWordsMask; - static size_t HumongousThresholdBytes; - static size_t HumongousThresholdWords; static size_t MaxTLABSizeBytes; static size_t MaxTLABSizeWords; @@ -261,6 +259,10 @@ class ShenandoahHeapRegion { return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); } + inline static bool requires_humongous(size_t words) { + return words > ShenandoahHeapRegion::RegionSizeWords; + } + inline static size_t region_count() { return ShenandoahHeapRegion::RegionCount; } @@ -313,14 +315,6 @@ class ShenandoahHeapRegion { return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; } - inline static size_t humongous_threshold_bytes() { - return ShenandoahHeapRegion::HumongousThresholdBytes; - } - - inline static size_t humongous_threshold_words() { - return ShenandoahHeapRegion::HumongousThresholdWords; - } - inline static size_t max_tlab_size_bytes() { return ShenandoahHeapRegion::MaxTLABSizeBytes; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp index db5a68d584e..baf95a5bdf7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp @@ -43,7 +43,6 @@ void ShenandoahInitLogger::print_heap() { log_info(gc, init)("Heap Region Count: " SIZE_FORMAT, ShenandoahHeapRegion::region_count()); log_info(gc, init)("Heap Region Size: " EXACTFMT, EXACTFMTARGS(ShenandoahHeapRegion::region_size_bytes())); log_info(gc, init)("TLAB Size Max: " EXACTFMT, EXACTFMTARGS(ShenandoahHeapRegion::max_tlab_size_bytes())); - log_info(gc, init)("Humongous Object Threshold: " EXACTFMT, EXACTFMTARGS(ShenandoahHeapRegion::humongous_threshold_bytes())); } void ShenandoahInitLogger::print_gc_specific() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index 87702afe98e..c66e5839d58 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -49,13 +49,6 @@ "With automatic region sizing, the regions would be at most " \ "this large.") \ \ - product(intx, ShenandoahHumongousThreshold, 100, EXPERIMENTAL, \ - "Humongous objects are allocated in separate regions. " \ - "This setting defines how large the object should be to be " \ - "deemed humongous. Value is in percents of heap region size. " \ - "This also caps the maximum TLAB size.") \ - range(1, 100) \ - \ product(ccstr, ShenandoahGCMode, "satb", \ "GC mode to use. Among other things, this defines which " \ "barriers are in in use. Possible values are:" \ diff --git a/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java b/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java deleted file mode 100644 index 845a6617ebd..00000000000 --- a/test/hotspot/jtreg/gc/shenandoah/TestHumongousThreshold.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * @test id=default - * @key randomness - * @requires vm.gc.Shenandoah - * @library /test/lib - * - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:+ShenandoahVerify - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 - * TestHumongousThreshold - * - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:+ShenandoahVerify - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 - * TestHumongousThreshold - * - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive - * TestHumongousThreshold - */ - -/* - * @test id=16b - * @key randomness - * @requires vm.gc.Shenandoah - * @requires vm.bits == "64" - * @library /test/lib - * - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 - * TestHumongousThreshold - * - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 - * TestHumongousThreshold - * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g - * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 - * TestHumongousThreshold - */ - -import java.util.Random; -import jdk.test.lib.Utils; - -public class TestHumongousThreshold { - - static final long TARGET_MB = Long.getLong("target", 20_000); // 20 Gb allocation - - static volatile Object sink; - - public static void main(String[] args) throws Exception { - final int min = 0; - final int max = 384 * 1024; - long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); - - Random r = Utils.getRandomInstance(); - for (long c = 0; c < count; c++) { - sink = new int[min + r.nextInt(max - min)]; - } - } - -} diff --git a/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java b/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java deleted file mode 100644 index 47d4115ce74..00000000000 --- a/test/hotspot/jtreg/gc/shenandoah/options/TestHumongousThresholdArgs.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * @test - * @summary Test that Shenandoah humongous threshold args are checked - * @requires vm.gc.Shenandoah - * @library /test/lib - * @modules java.base/jdk.internal.misc - * java.management - * @run driver TestHumongousThresholdArgs - */ - -import jdk.test.lib.process.ProcessTools; -import jdk.test.lib.process.OutputAnalyzer; - -public class TestHumongousThresholdArgs { - public static void main(String[] args) throws Exception { - { - OutputAnalyzer output = ProcessTools.executeLimitedTestJava( - "-Xmx128m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - "-version"); - output.shouldHaveExitValue(0); - } - - int[] valid = new int[] {1, 10, 50, 90, 100}; - int[] invalid = new int[] {-100, -1, 0, 101, 1000}; - - for (int v : valid) { - OutputAnalyzer output = ProcessTools.executeLimitedTestJava( - "-Xmx128m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - "-XX:ShenandoahHumongousThreshold=" + v, - "-version"); - output.shouldHaveExitValue(0); - } - - for (int v : invalid) { - OutputAnalyzer output = ProcessTools.executeLimitedTestJava( - "-Xmx128m", - "-XX:+UnlockExperimentalVMOptions", - "-XX:+UseShenandoahGC", - "-XX:ShenandoahHumongousThreshold=" + v, - "-version"); - output.shouldHaveExitValue(1); - } - } -} From e51e40c2b9f51d012c01407e0b8dadaab464753e Mon Sep 17 00:00:00 2001 From: Satyen Subramaniam Date: Thu, 15 Aug 2024 16:47:08 +0000 Subject: [PATCH 09/67] 8336914: Shenandoah: Missing verification steps after JDK-8255765 Reviewed-by: shade --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 86d6d91f72c..0301ef422a6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -921,8 +921,11 @@ void ShenandoahConcurrentGC::op_init_updaterefs() { heap->set_evacuation_in_progress(false); heap->set_concurrent_weak_root_in_progress(false); heap->prepare_update_heap_references(true /*concurrent*/); - heap->set_update_refs_in_progress(true); + if (ShenandoahVerify) { + heap->verifier()->verify_before_updaterefs(); + } + heap->set_update_refs_in_progress(true); if (ShenandoahPacing) { heap->pacer()->setup_for_updaterefs(); } From f308b2d59672b39ddca502baff50ab20ab781047 Mon Sep 17 00:00:00 2001 From: Satyen Subramaniam Date: Thu, 15 Aug 2024 16:47:45 +0000 Subject: [PATCH 10/67] 8336915: Shenandoah: Remove unused ShenandoahVerifier::verify_after_evacuation Reviewed-by: shade --- .../share/gc/shenandoah/shenandoahVerifier.cpp | 12 ------------ .../share/gc/shenandoah/shenandoahVerifier.hpp | 1 - 2 files changed, 13 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 694736cea42..23da3d7f637 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -856,18 +856,6 @@ void ShenandoahVerifier::verify_during_evacuation() { ); } -void ShenandoahVerifier::verify_after_evacuation() { - verify_at_safepoint( - "After Evacuation", - _verify_forwarded_allow, // objects are still forwarded - _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well - _verify_cset_forwarded, // all cset refs are fully forwarded - _verify_liveness_disable, // no reliable liveness data anymore - _verify_regions_notrash, // trash regions have been recycled already - _verify_gcstate_forwarded // evacuation produced some forwarded objects - ); -} - void ShenandoahVerifier::verify_before_updaterefs() { verify_at_safepoint( "Before Updating References", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index 2bbe5ae68b2..dd4eb901a33 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -176,7 +176,6 @@ class ShenandoahVerifier : public CHeapObj { void verify_after_concmark(); void verify_before_evacuation(); void verify_during_evacuation(); - void verify_after_evacuation(); void verify_before_updaterefs(); void verify_after_updaterefs(); void verify_before_fullgc(); From 965508270ecd092019f7bea3a1605c5d9f19d81e Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Thu, 15 Aug 2024 17:43:09 +0000 Subject: [PATCH 11/67] 8338330: Fix -Wzero-as-null-pointer-constant warnings from THROW_XXX_0 Reviewed-by: dlong, dholmes, shade --- src/hotspot/share/prims/jvm.cpp | 2 +- src/hotspot/share/runtime/reflection.cpp | 18 +++++++++--------- src/hotspot/share/utilities/exceptions.hpp | 5 ++++- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index e40c1128966..a26a82debe7 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -3403,7 +3403,7 @@ JVM_ENTRY_NO_ENV(void*, JVM_LoadLibrary(const char* name, jboolean throwExceptio vmSymbols::java_lang_UnsatisfiedLinkError(), msg, Exceptions::unsafe_to_utf8); - THROW_HANDLE_0(h_exception); + THROW_HANDLE_NULL(h_exception); } else { log_info(library)("Failed to load library %s", name); return load_result; diff --git a/src/hotspot/share/runtime/reflection.cpp b/src/hotspot/share/runtime/reflection.cpp index 865d25fa06b..ab3d82ad7e2 100644 --- a/src/hotspot/share/runtime/reflection.cpp +++ b/src/hotspot/share/runtime/reflection.cpp @@ -1004,9 +1004,9 @@ static oop invoke(InstanceKlass* klass, // JVMTI internal flag reset is needed in order to report InvocationTargetException JvmtiExport::clear_detected_exception(THREAD); JavaCallArguments args(Handle(THREAD, resolution_exception)); - THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(), - vmSymbols::throwable_void_signature(), - &args); + THROW_ARG_NULL(vmSymbols::java_lang_reflect_InvocationTargetException(), + vmSymbols::throwable_void_signature(), + &args); } } else { // if the method can be overridden, we resolve using the vtable index. @@ -1028,9 +1028,9 @@ static oop invoke(InstanceKlass* klass, Handle h_origexception = Exceptions::new_exception(THREAD, vmSymbols::java_lang_AbstractMethodError(), ss.as_string()); JavaCallArguments args(h_origexception); - THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(), - vmSymbols::throwable_void_signature(), - &args); + THROW_ARG_NULL(vmSymbols::java_lang_reflect_InvocationTargetException(), + vmSymbols::throwable_void_signature(), + &args); } } } @@ -1117,9 +1117,9 @@ static oop invoke(InstanceKlass* klass, JvmtiExport::clear_detected_exception(THREAD); JavaCallArguments args(Handle(THREAD, target_exception)); - THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(), - vmSymbols::throwable_void_signature(), - &args); + THROW_ARG_NULL(vmSymbols::java_lang_reflect_InvocationTargetException(), + vmSymbols::throwable_void_signature(), + &args); } else { if (rtype == T_BOOLEAN || rtype == T_BYTE || rtype == T_CHAR || rtype == T_SHORT) { narrow((jvalue*)result.get_value_addr(), rtype, CHECK_NULL); diff --git a/src/hotspot/share/utilities/exceptions.hpp b/src/hotspot/share/utilities/exceptions.hpp index bddb8d79c1e..33eba68d6d9 100644 --- a/src/hotspot/share/utilities/exceptions.hpp +++ b/src/hotspot/share/utilities/exceptions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -311,6 +311,9 @@ class Exceptions { #define THROW_NULL(name) THROW_(name, nullptr) #define THROW_MSG_NULL(name, message) THROW_MSG_(name, message, nullptr) +#define THROW_HANDLE_NULL(e) THROW_HANDLE_(e, nullptr) +#define THROW_ARG_NULL(name, signature, arg) THROW_ARG_(name, signature, arg, nullptr) + // The CATCH macro checks that no exception has been thrown by a function; it is used at // call sites about which is statically known that the callee cannot throw an exception // even though it is declared with TRAPS. From ace496515f4f91e802a51cec43d387eed61bd935 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Thu, 15 Aug 2024 17:50:34 +0000 Subject: [PATCH 12/67] 8338406: BytecodeHelpers using wrong bootstrap method descriptor for condy Reviewed-by: asotona --- .../classfile/impl/BytecodeHelpers.java | 12 +---- ...Test.java => ConstantDescSymbolsTest.java} | 52 ++++++++++++++----- 2 files changed, 41 insertions(+), 23 deletions(-) rename test/jdk/jdk/classfile/{PrimitiveClassConstantTest.java => ConstantDescSymbolsTest.java} (53%) diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BytecodeHelpers.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BytecodeHelpers.java index f7aa0902bc3..474189121fd 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BytecodeHelpers.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BytecodeHelpers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -275,15 +275,7 @@ static ConstantDynamicEntry handleConstantDescToHandleInfo(ConstantPoolBuilder c List staticArgs = new ArrayList<>(bootstrapArgs.length); for (ConstantDesc bootstrapArg : bootstrapArgs) staticArgs.add(constantPool.loadableConstantEntry(bootstrapArg)); - - var bootstrapDesc = desc.bootstrapMethod(); - ClassEntry bsOwner = constantPool.classEntry(bootstrapDesc.owner()); - NameAndTypeEntry bsNameAndType = constantPool.nameAndTypeEntry(bootstrapDesc.methodName(), - bootstrapDesc.invocationType()); - int bsRefKind = bootstrapDesc.refKind(); - - MemberRefEntry memberRefEntry = toBootstrapMemberRef(constantPool, bsRefKind, bsOwner, bsNameAndType, bootstrapDesc.isOwnerInterface()); - MethodHandleEntry methodHandleEntry = constantPool.methodHandleEntry(bsRefKind, memberRefEntry); + MethodHandleEntry methodHandleEntry = handleDescToHandleInfo(constantPool, desc.bootstrapMethod()); BootstrapMethodEntry bme = constantPool.bsmEntry(methodHandleEntry, staticArgs); return constantPool.constantDynamicEntry(bme, constantPool.nameAndTypeEntry(desc.constantName(), diff --git a/test/jdk/jdk/classfile/PrimitiveClassConstantTest.java b/test/jdk/jdk/classfile/ConstantDescSymbolsTest.java similarity index 53% rename from test/jdk/jdk/classfile/PrimitiveClassConstantTest.java rename to test/jdk/jdk/classfile/ConstantDescSymbolsTest.java index 89cf43751f8..7c97c9dd5a9 100644 --- a/test/jdk/jdk/classfile/PrimitiveClassConstantTest.java +++ b/test/jdk/jdk/classfile/ConstantDescSymbolsTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,12 +23,14 @@ /* * @test - * @bug 8304031 - * @summary Testing that primitive class descs are encoded properly as loadable constants. - * @run junit PrimitiveClassConstantTest + * @bug 8304031 8338406 + * @summary Testing handling of various constant descriptors in ClassFile API. + * @run junit ConstantDescSymbolsTest */ import java.lang.constant.ClassDesc; +import java.lang.constant.DynamicConstantDesc; +import java.lang.constant.MethodHandleDesc; import java.lang.constant.MethodTypeDesc; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; @@ -37,18 +39,16 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import static java.lang.constant.ConstantDescs.CD_Class; -import static java.lang.constant.ConstantDescs.CD_Object; -import static java.lang.constant.ConstantDescs.CD_int; -import static java.lang.constant.ConstantDescs.CD_long; -import static java.lang.constant.ConstantDescs.INIT_NAME; -import static java.lang.constant.ConstantDescs.MTD_void; import static java.lang.classfile.ClassFile.ACC_PUBLIC; +import static java.lang.constant.ConstantDescs.*; -public final class PrimitiveClassConstantTest { +import static org.junit.jupiter.api.Assertions.*; +final class ConstantDescSymbolsTest { + + // Testing that primitive class descs are encoded properly as loadable constants. @Test - public void test() throws Throwable { + void testPrimitiveClassDesc() throws Throwable { ClassDesc ape = ClassDesc.of("Ape"); var lookup = MethodHandles.lookup(); Class a = lookup.defineClass(ClassFile.of().build(ape, clb -> { @@ -73,7 +73,33 @@ public void test() throws Throwable { Supplier t = (Supplier) lookup.findConstructor(a, MethodType.methodType(void.class)) .asType(MethodType.methodType(Supplier.class)) .invokeExact(); - Assertions.assertSame(int.class, t.get()); + assertSame(int.class, t.get()); } + // Tests that condy symbols with non-static-method bootstraps are using the right lookup descriptor. + @Test + void testConstantDynamicNonStaticBootstrapMethod() throws Throwable { + record CondyBoot(MethodHandles.Lookup lookup, String name, Class type) {} + var bootClass = CondyBoot.class.describeConstable().orElseThrow(); + var bootMhDesc = MethodHandleDesc.ofConstructor(bootClass, CD_MethodHandles_Lookup, CD_String, CD_Class); + var condyDesc = DynamicConstantDesc.of(bootMhDesc); + + var targetCd = ClassDesc.of("Bat"); + var lookup = MethodHandles.lookup(); + Class a = lookup.defineClass(ClassFile.of().build(targetCd, clb -> { + clb.withInterfaceSymbols(Supplier.class.describeConstable().orElseThrow()) + .withMethodBody(INIT_NAME, MTD_void, ACC_PUBLIC, cob -> cob + .aload(0).invokespecial(CD_Object, INIT_NAME, MTD_void).return_()) + .withMethodBody("get", MethodTypeDesc.of(CD_Object), ACC_PUBLIC, cob -> cob + .loadConstant(condyDesc).areturn()); + })); + @SuppressWarnings("unchecked") + Supplier t = (Supplier) lookup.findConstructor(a, MethodType.methodType(void.class)) + .asType(MethodType.methodType(Supplier.class)).invokeExact(); + var cb = t.get(); + assertEquals(MethodHandles.Lookup.ORIGINAL, cb.lookup.lookupModes() & MethodHandles.Lookup.ORIGINAL); + assertSame(a, cb.lookup.lookupClass()); + assertEquals(DEFAULT_NAME, cb.name); + assertEquals(CondyBoot.class, cb.type); + } } From 52d9d69db5c1853445a95794c5bf21243aefa852 Mon Sep 17 00:00:00 2001 From: Kim Barrett Date: Thu, 15 Aug 2024 17:50:44 +0000 Subject: [PATCH 13/67] 8338331: Fix -Wzero-as-null-pointer-constant warnings from CHECK_0 in jni.cpp Reviewed-by: dholmes, shade --- src/hotspot/share/prims/jni.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index 12eba0ff623..c9357fe0216 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -1156,7 +1156,7 @@ JNI_ENTRY(ResultType, \ va_start(args, methodID); \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherVaArg ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ va_end(args); \ ret = jvalue.get_##ResultType(); \ return ret;\ @@ -1209,7 +1209,7 @@ JNI_ENTRY(ResultType, \ \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherVaArg ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ ret = jvalue.get_##ResultType(); \ return ret;\ JNI_END @@ -1260,7 +1260,7 @@ JNI_ENTRY(ResultType, \ \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherArray ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ ret = jvalue.get_##ResultType(); \ return ret;\ JNI_END @@ -1353,7 +1353,7 @@ JNI_ENTRY(ResultType, \ va_start(args, methodID); \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherVaArg ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ va_end(args); \ ret = jvalue.get_##ResultType(); \ return ret;\ @@ -1406,7 +1406,7 @@ JNI_ENTRY(ResultType, \ \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherVaArg ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ ret = jvalue.get_##ResultType(); \ return ret;\ JNI_END @@ -1458,7 +1458,7 @@ JNI_ENTRY(ResultType, \ \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherArray ap(methodID, args); \ - jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \ + jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_(ResultType{})); \ ret = jvalue.get_##ResultType(); \ return ret;\ JNI_END @@ -1554,7 +1554,7 @@ JNI_ENTRY(ResultType, \ va_start(args, methodID); \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherVaArg ap(methodID, args); \ - jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_0); \ + jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_(ResultType{})); \ va_end(args); \ ret = jvalue.get_##ResultType(); \ return ret;\ @@ -1609,8 +1609,8 @@ JNI_ENTRY(ResultType, \ JNI_ArgumentPusherVaArg ap(methodID, args); \ /* Make sure class is initialized before trying to invoke its method */ \ Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); \ - k->initialize(CHECK_0); \ - jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_0); \ + k->initialize(CHECK_(ResultType{})); \ + jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_(ResultType{})); \ va_end(args); \ ret = jvalue.get_##ResultType(); \ return ret;\ @@ -1663,7 +1663,7 @@ JNI_ENTRY(ResultType, \ \ JavaValue jvalue(Tag); \ JNI_ArgumentPusherArray ap(methodID, args); \ - jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_0); \ + jni_invoke_static(env, &jvalue, nullptr, JNI_STATIC, methodID, &ap, CHECK_(ResultType{})); \ ret = jvalue.get_##ResultType(); \ return ret;\ JNI_END From 1cd488436880b00c55fa91f44c115999cf686afd Mon Sep 17 00:00:00 2001 From: Coleen Phillimore Date: Thu, 15 Aug 2024 18:20:20 +0000 Subject: [PATCH 14/67] 8338447: Remove InstanceKlass::_is_marked_dependent Reviewed-by: shade --- src/hotspot/share/oops/instanceKlass.hpp | 4 ---- src/hotspot/share/runtime/vmStructs.cpp | 1 - 2 files changed, 5 deletions(-) diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 6e5d4ac8e7f..b639b820d10 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -225,10 +225,6 @@ class InstanceKlass: public Klass { volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change - // _is_marked_dependent can be set concurrently, thus cannot be part of the - // _misc_flags. - bool _is_marked_dependent; // used for marking during flushing and deoptimization - // Class states are defined as ClassState (see above). // Place the _init_state here to utilize the unused 2-byte after // _idnum_allocated_count. diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 27dc10d2adb..913f988e48b 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -246,7 +246,6 @@ nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \ volatile_nonstatic_field(InstanceKlass, _init_state, InstanceKlass::ClassState) \ volatile_nonstatic_field(InstanceKlass, _init_thread, JavaThread*) \ - nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \ nonstatic_field(InstanceKlass, _itable_len, int) \ nonstatic_field(InstanceKlass, _nest_host_index, u2) \ nonstatic_field(InstanceKlass, _reference_type, u1) \ From d86e99c3ca94ee8705e44fe2830edd3ceb0a7f64 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 15 Aug 2024 20:52:07 +0000 Subject: [PATCH 15/67] 8293650: Shenandoah: Support archived heap objects Reviewed-by: rkennke, wkemper, iklam --- src/hotspot/share/cds/archiveHeapWriter.hpp | 12 +-- src/hotspot/share/cds/filemap.cpp | 2 +- .../share/gc/shenandoah/shenandoahAsserts.cpp | 14 ++- .../share/gc/shenandoah/shenandoahHeap.cpp | 78 ++++++++++++++++ .../share/gc/shenandoah/shenandoahHeap.hpp | 6 ++ .../gc/shenandoah/shenandoahHeapRegion.cpp | 6 +- .../cds/appcds/TestShenandoahWithCDS.java | 89 +++++++++++++++++++ 7 files changed, 195 insertions(+), 12 deletions(-) create mode 100644 test/hotspot/jtreg/runtime/cds/appcds/TestShenandoahWithCDS.java diff --git a/src/hotspot/share/cds/archiveHeapWriter.hpp b/src/hotspot/share/cds/archiveHeapWriter.hpp index 99d5294007f..352aeb9a08f 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.hpp +++ b/src/hotspot/share/cds/archiveHeapWriter.hpp @@ -112,6 +112,12 @@ class ArchiveHeapWriter : AllStatic { public: static const intptr_t NOCOOPS_REQUESTED_BASE = 0x10000000; + // The minimum region size of all collectors that are supported by CDS in + // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size + // depends on -Xmx, but can never be smaller than 1 * M. + // (TODO: Perhaps change to 256K to be compatible with Shenandoah) + static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M; + private: class EmbeddedOopRelocator; struct NativePointerInfo { @@ -119,12 +125,6 @@ class ArchiveHeapWriter : AllStatic { int _field_offset; }; - // The minimum region size of all collectors that are supported by CDS in - // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size - // depends on -Xmx, but can never be smaller than 1 * M. - // (TODO: Perhaps change to 256K to be compatible with Shenandoah) - static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M; - static GrowableArrayCHeap* _buffer; // The number of bytes that have written into _buffer (may be smaller than _buffer->length()). diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index c5a9d5cceed..dc6c7ea097c 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -2025,7 +2025,7 @@ void FileMapInfo::map_or_load_heap_region() { // TODO - remove implicit knowledge of G1 log_info(cds)("Cannot use CDS heap data. UseG1GC is required for -XX:-UseCompressedOops"); } else { - log_info(cds)("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC or UseParallelGC are required."); + log_info(cds)("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required."); } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 5abd7b805f9..8235b59b80e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -254,7 +254,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* // Do additional checks for special objects: their fields can hold metadata as well. // We want to check class loading/unloading did not corrupt them. - if (java_lang_Class::is_instance(obj)) { + if (Universe::is_fully_initialized() && java_lang_Class::is_instance(obj)) { Metadata* klass = obj->metadata_field(java_lang_Class::klass_offset()); if (klass != nullptr && !Metaspace::contains(klass)) { print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", @@ -283,10 +283,12 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co } size_t alloc_size = obj->size(); + HeapWord* obj_end = cast_from_oop(obj) + alloc_size; + if (ShenandoahHeapRegion::requires_humongous(alloc_size)) { size_t idx = r->index(); - size_t num_regions = ShenandoahHeapRegion::required_regions(alloc_size * HeapWordSize); - for (size_t i = idx; i < idx + num_regions; i++) { + size_t end_idx = heap->heap_region_index_containing(obj_end - 1); + for (size_t i = idx; i < end_idx; i++) { ShenandoahHeapRegion* chain_reg = heap->get_region(i); if (i == idx && !chain_reg->is_humongous_start()) { print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed", @@ -299,6 +301,12 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co file, line); } } + } else { + if (obj_end > r->top()) { + print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed", + "Object end should be within the active area of the region", + file, line); + } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 7904cd5f1cd..6f5fce53f85 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -72,6 +72,7 @@ #include "gc/shenandoah/shenandoahJfrSupport.hpp" #endif +#include "cds/archiveHeapWriter.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "memory/classLoaderMetaspace.hpp" @@ -2482,3 +2483,80 @@ bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const { return false; } + +HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) { +#if INCLUDE_CDS_JAVA_HEAP + // CDS wants a continuous memory range to load a bunch of objects. + // This effectively bypasses normal allocation paths, and requires + // a bit of massaging to unbreak GC invariants. + + ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); + + // Easy case: a single regular region, no further adjustments needed. + if (!ShenandoahHeapRegion::requires_humongous(size)) { + return allocate_memory(req); + } + + // Hard case: the requested size would cause a humongous allocation. + // We need to make sure it looks like regular allocation to the rest of GC. + + // CDS code would guarantee no objects straddle multiple regions, as long as + // regions are as large as MIN_GC_REGION_ALIGNMENT. It is impractical at this + // point to deal with case when Shenandoah runs with smaller regions. + // TODO: This check can be dropped once MIN_GC_REGION_ALIGNMENT agrees more with Shenandoah. + if (ShenandoahHeapRegion::region_size_bytes() < ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT) { + return nullptr; + } + + HeapWord* mem = allocate_memory(req); + size_t start_idx = heap_region_index_containing(mem); + size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); + + // Flip humongous -> regular. + { + ShenandoahHeapLocker locker(lock(), false); + for (size_t c = start_idx; c < start_idx + num_regions; c++) { + get_region(c)->make_regular_bypass(); + } + } + + return mem; +#else + assert(false, "Archive heap loader should not be available, should not be here"); + return nullptr; +#endif // INCLUDE_CDS_JAVA_HEAP +} + +void ShenandoahHeap::complete_loaded_archive_space(MemRegion archive_space) { + // Nothing to do here, except checking that heap looks fine. +#ifdef ASSERT + HeapWord* start = archive_space.start(); + HeapWord* end = archive_space.end(); + + // No unclaimed space between the objects. + // Objects are properly allocated in correct regions. + HeapWord* cur = start; + while (cur < end) { + oop oop = cast_to_oop(cur); + shenandoah_assert_in_correct_region(nullptr, oop); + cur += oop->size(); + } + + // No unclaimed tail at the end of archive space. + assert(cur == end, + "Archive space should be fully used: " PTR_FORMAT " " PTR_FORMAT, + p2i(cur), p2i(end)); + + // Region bounds are good. + ShenandoahHeapRegion* begin_reg = heap_region_containing(start); + ShenandoahHeapRegion* end_reg = heap_region_containing(end); + assert(begin_reg->is_regular(), "Must be"); + assert(end_reg->is_regular(), "Must be"); + assert(begin_reg->bottom() == start, + "Must agree: archive-space-start: " PTR_FORMAT ", begin-region-bottom: " PTR_FORMAT, + p2i(start), p2i(begin_reg->bottom())); + assert(end_reg->top() == end, + "Must agree: archive-space-end: " PTR_FORMAT ", end-region-top: " PTR_FORMAT, + p2i(end), p2i(end_reg->top())); +#endif +} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 86bd7b83bbe..81b1c3df6f4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -539,6 +539,12 @@ class ShenandoahHeap : public CollectedHeap, public ShenandoahSpaceInfo { void sync_pinned_region_status(); void assert_pinned_region_status() NOT_DEBUG_RETURN; +// ---------- CDS archive support + + bool can_load_archived_objects() const override { return UseCompressedOops; } + HeapWord* allocate_loaded_archive_space(size_t size) override; + void complete_loaded_archive_space(MemRegion archive_space) override; + // ---------- Allocation support // private: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index eceed8dbe43..92602871ccd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -100,8 +100,10 @@ void ShenandoahHeapRegion::make_regular_allocation() { void ShenandoahHeapRegion::make_regular_bypass() { shenandoah_assert_heaplocked(); - assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(), - "only for full or degen GC"); + assert (!Universe::is_fully_initialized() || + ShenandoahHeap::heap()->is_full_gc_in_progress() || + ShenandoahHeap::heap()->is_degenerated_gc_in_progress(), + "Only for STW GC or when Universe is initializing (CDS)"); switch (_state) { case _empty_uncommitted: diff --git a/test/hotspot/jtreg/runtime/cds/appcds/TestShenandoahWithCDS.java b/test/hotspot/jtreg/runtime/cds/appcds/TestShenandoahWithCDS.java new file mode 100644 index 00000000000..83442c1e159 --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/appcds/TestShenandoahWithCDS.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8293650 + * @requires vm.cds + * @requires vm.bits == 64 + * @requires vm.gc.Shenandoah + * @requires vm.gc.G1 + * @requires vm.gc == null + * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds + * @compile test-classes/Hello.java + * @run driver TestShenandoahWithCDS + */ + +import jdk.test.lib.Platform; +import jdk.test.lib.process.OutputAnalyzer; + +public class TestShenandoahWithCDS { + public final static String HELLO = "Hello World"; + static String helloJar; + + public static void main(String... args) throws Exception { + helloJar = JarBuilder.build("hello", "Hello"); + + // Run with the variety of region sizes, and combinations + // of G1/Shenandoah at dump/exec times. "-1" means to use G1. + final int[] regionSizes = { -1, 256, 512, 1024, 2048 }; + + for (int dumpRegionSize : regionSizes) { + for (int execRegionSize : regionSizes) { + test(dumpRegionSize, execRegionSize); + } + } + } + + static void test(int dumpRegionSize, int execRegionSize) throws Exception { + String exp = "-XX:+UnlockExperimentalVMOptions"; + String optDumpGC = (dumpRegionSize != -1) ? "-XX:+UseShenandoahGC" : "-XX:+UseG1GC"; + String optExecGC = (execRegionSize != -1) ? "-XX:+UseShenandoahGC" : "-XX:+UseG1GC"; + String optDumpRegionSize = (dumpRegionSize != -1) ? "-XX:ShenandoahRegionSize=" + dumpRegionSize + "K" : exp; + String optExecRegionSize = (execRegionSize != -1) ? "-XX:ShenandoahRegionSize=" + execRegionSize + "K" : exp; + OutputAnalyzer out; + + System.out.println("0. Dump with " + optDumpGC + " and " + optDumpRegionSize); + out = TestCommon.dump(helloJar, + new String[] {"Hello"}, + exp, + "-Xmx1g", + optDumpGC, + optDumpRegionSize, + "-Xlog:cds"); + out.shouldContain("Dumping shared data to file:"); + out.shouldHaveExitValue(0); + + System.out.println("1. Exec with " + optExecGC + " and " + optExecRegionSize); + out = TestCommon.exec(helloJar, + exp, + "-Xmx1g", + optExecGC, + optExecRegionSize, + "-Xlog:cds", + "Hello"); + out.shouldContain(HELLO); + out.shouldHaveExitValue(0); + } +} From 74066bcca82749722e6fee57469520d418bf3430 Mon Sep 17 00:00:00 2001 From: Shaojin Wen Date: Thu, 15 Aug 2024 22:57:33 +0000 Subject: [PATCH 16/67] 8338409: Use record to simplify code Reviewed-by: redestad, liach --- src/java.base/share/classes/java/util/Formatter.java | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/java.base/share/classes/java/util/Formatter.java b/src/java.base/share/classes/java/util/Formatter.java index dfdb77cc6ac..1404892ff2a 100644 --- a/src/java.base/share/classes/java/util/Formatter.java +++ b/src/java.base/share/classes/java/util/Formatter.java @@ -3021,15 +3021,7 @@ interface FormatString { String toString(); } - private static class FixedString implements FormatString { - private final String s; - private final int start; - private final int end; - FixedString(String s, int start, int end) { - this.s = s; - this.start = start; - this.end = end; - } + private record FixedString(String s, int start, int end) implements FormatString { public int index() { return -2; } public void print(Formatter fmt, Object arg, Locale l) throws IOException { fmt.a.append(s, start, end); } From bd4160cea8b6b0fcf0507199ed76a12f5d0aaba9 Mon Sep 17 00:00:00 2001 From: Axel Boldt-Christmas Date: Fri, 16 Aug 2024 06:20:17 +0000 Subject: [PATCH 17/67] 8315884: New Object to ObjectMonitor mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Erik Österlund Co-authored-by: Stefan Karlsson Co-authored-by: Coleen Phillimore Reviewed-by: rkennke, coleenp, dcubed --- src/hotspot/cpu/aarch64/aarch64.ad | 4 +- .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 2 +- .../cpu/aarch64/c2_MacroAssembler_aarch64.cpp | 105 +- .../cpu/aarch64/c2_MacroAssembler_aarch64.hpp | 4 +- .../cpu/aarch64/interp_masm_aarch64.cpp | 14 +- .../cpu/aarch64/macroAssembler_aarch64.cpp | 9 +- .../cpu/aarch64/macroAssembler_aarch64.hpp | 2 +- .../cpu/aarch64/sharedRuntime_aarch64.cpp | 2 +- src/hotspot/cpu/arm/interp_masm_arm.cpp | 10 +- src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp | 6 +- src/hotspot/cpu/ppc/macroAssembler_ppc.cpp | 152 +- .../cpu/riscv/c2_MacroAssembler_riscv.cpp | 114 +- src/hotspot/cpu/riscv/interp_masm_riscv.cpp | 12 +- src/hotspot/cpu/s390/interp_masm_s390.cpp | 13 +- src/hotspot/cpu/s390/macroAssembler_s390.cpp | 108 +- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 14 +- src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp | 13 +- src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp | 128 +- src/hotspot/cpu/x86/interp_masm_x86.cpp | 24 +- src/hotspot/cpu/x86/macroAssembler_x86.cpp | 19 +- src/hotspot/cpu/x86/macroAssembler_x86.hpp | 2 +- src/hotspot/cpu/x86/sharedRuntime_x86.cpp | 8 +- src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp | 3 +- src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 2 +- src/hotspot/cpu/zero/zeroInterpreter_zero.cpp | 36 +- src/hotspot/share/c1/c1_Runtime1.cpp | 4 +- .../share/interpreter/interpreterRuntime.cpp | 20 +- .../interpreter/zero/bytecodeInterpreter.cpp | 170 ++- src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 5 +- src/hotspot/share/logging/logTag.hpp | 1 + src/hotspot/share/oops/markWord.cpp | 3 +- src/hotspot/share/oops/markWord.hpp | 13 +- src/hotspot/share/opto/c2_CodeStubs.hpp | 2 + src/hotspot/share/opto/library_call.cpp | 30 +- src/hotspot/share/prims/jvmtiEnvBase.cpp | 10 +- src/hotspot/share/runtime/arguments.cpp | 10 + src/hotspot/share/runtime/basicLock.cpp | 27 +- src/hotspot/share/runtime/basicLock.hpp | 34 +- .../share/runtime/basicLock.inline.hpp | 62 + src/hotspot/share/runtime/deoptimization.cpp | 34 +- src/hotspot/share/runtime/globals.hpp | 11 + src/hotspot/share/runtime/javaThread.cpp | 5 +- src/hotspot/share/runtime/javaThread.hpp | 9 + .../share/runtime/javaThread.inline.hpp | 23 + .../share/runtime/lightweightSynchronizer.cpp | 1223 +++++++++++++++++ .../share/runtime/lightweightSynchronizer.hpp | 80 ++ src/hotspot/share/runtime/lockStack.cpp | 11 + src/hotspot/share/runtime/lockStack.hpp | 33 +- .../share/runtime/lockStack.inline.hpp | 52 + src/hotspot/share/runtime/objectMonitor.cpp | 225 +-- src/hotspot/share/runtime/objectMonitor.hpp | 99 +- .../share/runtime/objectMonitor.inline.hpp | 63 +- src/hotspot/share/runtime/safepoint.cpp | 5 + src/hotspot/share/runtime/serviceThread.cpp | 19 +- src/hotspot/share/runtime/sharedRuntime.cpp | 8 +- src/hotspot/share/runtime/synchronizer.cpp | 496 +++---- src/hotspot/share/runtime/synchronizer.hpp | 17 +- .../share/runtime/synchronizer.inline.hpp | 81 ++ src/hotspot/share/runtime/vframe.cpp | 17 +- src/hotspot/share/runtime/vmStructs.cpp | 4 +- .../classes/sun/jvm/hotspot/oops/Mark.java | 10 + .../sun/jvm/hotspot/runtime/BasicLock.java | 2 +- .../jvm/hotspot/runtime/ObjectMonitor.java | 8 +- .../hotspot/runtime/ObjectSynchronizer.java | 3 + .../gtest/runtime/test_objectMonitor.cpp | 24 +- .../Monitor/UseObjectMonitorTableTest.java | 244 ++++ .../runtime/logging/MonitorInflationTest.java | 2 +- .../org/openjdk/bench/vm/lang/LockUnlock.java | 190 ++- 68 files changed, 3284 insertions(+), 911 deletions(-) create mode 100644 src/hotspot/share/runtime/basicLock.inline.hpp create mode 100644 src/hotspot/share/runtime/lightweightSynchronizer.cpp create mode 100644 src/hotspot/share/runtime/lightweightSynchronizer.hpp create mode 100644 src/hotspot/share/runtime/synchronizer.inline.hpp create mode 100644 test/hotspot/jtreg/runtime/Monitor/UseObjectMonitorTableTest.java diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index a96c47051f2..8eb2821cc57 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -15793,7 +15793,7 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); @@ -15809,7 +15809,7 @@ instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNo format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index c0455ad1bff..89624aeffdd 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -81,7 +81,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(obj, hdr, temp, rscratch2, slow_case); + lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case); } else if (LockingMode == LM_LEGACY) { Label done; // Load object header diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index 251ea3813ff..19af03d3488 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -224,10 +224,10 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe bind(no_count); } -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, +void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1, Register t2, Register t3) { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, t1, t2, t3); + assert_different_registers(obj, box, t1, t2, t3); // Handle inflated monitor. Label inflated; @@ -236,6 +236,11 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, // Finish fast lock unsuccessfully. MUST branch to with flag == NE Label slow_path; + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + str(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + } + if (DiagnoseSyncOnValueBasedClasses != 0) { load_klass(t1, obj); ldrw(t1, Address(t1, Klass::access_flags_offset())); @@ -244,6 +249,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, } const Register t1_mark = t1; + const Register t3_t = t3; { // Lightweight locking @@ -251,7 +257,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, Label push; const Register t2_top = t2; - const Register t3_t = t3; // Check if lock-stack is full. ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); @@ -289,26 +294,71 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, { // Handle inflated monitor. bind(inflated); - // mark contains the tagged ObjectMonitor*. - const Register t1_tagged_monitor = t1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; + const Register t1_monitor = t1; + + if (!UseObjectMonitorTable) { + assert(t1_monitor == t1_mark, "should be the same here"); + } else { + Label monitor_found; + + // Load cache address + lea(t3_t, Address(rthread, JavaThread::om_cache_oops_offset())); + + const int num_unrolled = 2; + for (int i = 0; i < num_unrolled; i++) { + ldr(t1, Address(t3_t)); + cmp(obj, t1); + br(Assembler::EQ, monitor_found); + increment(t3_t, in_bytes(OMCache::oop_to_oop_difference())); + } + + Label loop; + + // Search for obj in cache. + bind(loop); + + // Check for match. + ldr(t1, Address(t3_t)); + cmp(obj, t1); + br(Assembler::EQ, monitor_found); + + // Search until null encountered, guaranteed _null_sentinel at end. + increment(t3_t, in_bytes(OMCache::oop_to_oop_difference())); + cbnz(t1, loop); + // Cache Miss, NE set from cmp above, cbnz does not set flags + b(slow_path); + + bind(monitor_found); + ldr(t1_monitor, Address(t3_t, OMCache::oop_to_monitor_difference())); + } + const Register t2_owner_addr = t2; const Register t3_owner = t3; + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address owner_address(t1_monitor, ObjectMonitor::owner_offset() - monitor_tag); + const Address recursions_address(t1_monitor, ObjectMonitor::recursions_offset() - monitor_tag); + + Label monitor_locked; // Compute owner address. - lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); + lea(t2_owner_addr, owner_address); // CAS owner (null => current thread). cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true, /*release*/ false, /*weak*/ false, t3_owner); - br(Assembler::EQ, locked); + br(Assembler::EQ, monitor_locked); // Check if recursive. cmp(t3_owner, rthread); br(Assembler::NE, slow_path); // Recursive. - increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1); + increment(recursions_address, 1); + + bind(monitor_locked); + if (UseObjectMonitorTable) { + str(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + } } bind(locked); @@ -331,13 +381,13 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1, // C2 uses the value of Flags (NE vs EQ) to determine the continuation. } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2, - Register t3) { +void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1, + Register t2, Register t3) { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, t1, t2, t3); + assert_different_registers(obj, box, t1, t2, t3); // Handle inflated monitor. - Label inflated, inflated_load_monitor; + Label inflated, inflated_load_mark; // Finish fast unlock successfully. MUST branch to with flag == EQ Label unlocked; // Finish fast unlock unsuccessfully. MUST branch to with flag == NE @@ -349,13 +399,15 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Regis { // Lightweight unlock + Label push_and_slow_path; + // Check if obj is top of lock-stack. ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset())); subw(t2_top, t2_top, oopSize); ldr(t3_t, Address(rthread, t2_top)); cmp(obj, t3_t); // Top of lock stack was not obj. Must be monitor. - br(Assembler::NE, inflated_load_monitor); + br(Assembler::NE, inflated_load_mark); // Pop lock-stack. DEBUG_ONLY(str(zr, Address(rthread, t2_top));) @@ -372,7 +424,10 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Regis ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); // Check header for monitor (0b10). - tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated); + // Because we got here by popping (meaning we pushed in locked) + // there will be no monitor in the box. So we need to push back the obj + // so that the runtime can fix any potential anonymous owner. + tbnz(t1_mark, exact_log2(markWord::monitor_value), UseObjectMonitorTable ? push_and_slow_path : inflated); // Try to unlock. Transition lock bits 0b00 => 0b01 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); @@ -381,6 +436,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Regis /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); br(Assembler::EQ, unlocked); + bind(push_and_slow_path); // Compare and exchange failed. // Restore lock-stack and handle the unlock in runtime. DEBUG_ONLY(str(obj, Address(rthread, t2_top));) @@ -391,7 +447,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Regis { // Handle inflated monitor. - bind(inflated_load_monitor); + bind(inflated_load_mark); ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); #ifdef ASSERT tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated); @@ -412,12 +468,19 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Regis bind(check_done); #endif - // mark contains the tagged ObjectMonitor*. - const Register t1_monitor = t1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; + const Register t1_monitor = t1; + + if (!UseObjectMonitorTable) { + assert(t1_monitor == t1_mark, "should be the same here"); - // Untag the monitor. - sub(t1_monitor, t1_mark, monitor_tag); + // Untag the monitor. + add(t1_monitor, t1_mark, -(int)markWord::monitor_value); + } else { + ldr(t1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + // null check with Flags == NE, no valid pointer below alignof(ObjectMonitor*) + cmp(t1_monitor, checked_cast(alignof(ObjectMonitor*))); + br(Assembler::LO, slow_path); + } const Register t2_recursions = t2; Label not_recursive; diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp index 1481f975020..43e60ae5a48 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp @@ -39,8 +39,8 @@ void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3); void fast_unlock(Register object, Register box, Register tmp, Register tmp2); // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(Register object, Register t1, Register t2, Register t3); - void fast_unlock_lightweight(Register object, Register t1, Register t2, Register t3); + void fast_lock_lightweight(Register object, Register box, Register t1, Register t2, Register t3); + void fast_unlock_lightweight(Register object, Register box, Register t1, Register t2, Register t3); void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result, diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index ca359fea9da..117168de0c5 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -696,7 +696,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case); + lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); b(count); } else if (LockingMode == LM_LEGACY) { // Load (object->mark() | 1) into swap_reg @@ -752,15 +752,9 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) bind(slow_case); // Call the runtime routine for slow case - if (LockingMode == LM_LIGHTWEIGHT) { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), - obj_reg); - } else { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), - lock_reg); - } + call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), + lock_reg); b(done); bind(count); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 9dd4371cf69..ead4220add0 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -6750,9 +6750,9 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { // - obj: the object to be locked // - t1, t2, t3: temporary registers, will be destroyed // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). -void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) { +void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); - assert_different_registers(obj, t1, t2, t3, rscratch1); + assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); Label push; const Register top = t1; @@ -6763,6 +6763,11 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Re // instruction emitted as it is part of C1's null check semantics. ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); + } + // Check if the lock-stack is full. ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); cmpw(top, (unsigned)LockStack::end_offset()); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index 3bfd6e70872..e49f0c49ef6 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -1639,7 +1639,7 @@ class MacroAssembler: public Assembler { // Code for java.lang.Thread::onSpinWait() intrinsic. void spin_wait(); - void lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow); + void lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow); void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow); private: diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index 8ce4230baa2..a4dac0ccf6d 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1811,7 +1811,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ br(Assembler::NE, slow_path_lock); } else { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); + __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); } __ bind(count); __ increment(Address(rthread, JavaThread::held_monitor_count_offset())); diff --git a/src/hotspot/cpu/arm/interp_masm_arm.cpp b/src/hotspot/cpu/arm/interp_masm_arm.cpp index ba161e360be..2874abafc4f 100644 --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp @@ -985,15 +985,7 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) { bind(slow_case); // Call the runtime routine for slow case - if (LockingMode == LM_LIGHTWEIGHT) { - // Pass oop, not lock, in fast lock case. call_VM wants R1 though. - push(R1); - mov(R1, Robj); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), R1); - pop(R1); - } else { - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock); - } + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock); bind(done); } } diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index cdb8a742dcd..3acee737a3a 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -1043,11 +1043,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // None of the above fast optimizations worked so we have to get into the // slow case of monitor enter. bind(slow_case); - if (LockingMode == LM_LIGHTWEIGHT) { - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), object); - } else { - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); - } + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor); b(done); // } align(32, 12); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index f9e584a1e6b..544c0d120d0 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2804,32 +2804,39 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister fla { // Handle inflated monitor. bind(inflated); - // mark contains the tagged ObjectMonitor*. - const Register tagged_monitor = mark; - const uintptr_t monitor_tag = markWord::monitor_value; - const Register owner_addr = tmp2; - - // Compute owner address. - addi(owner_addr, tagged_monitor, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag); - - // CAS owner (null => current thread). - cmpxchgd(/*flag=*/flag, - /*current_value=*/t, - /*compare_value=*/(intptr_t)0, - /*exchange_value=*/R16_thread, - /*where=*/owner_addr, - MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, - MacroAssembler::cmpxchgx_hint_acquire_lock()); - beq(flag, locked); - - // Check if recursive. - cmpd(flag, t, R16_thread); - bne(flag, slow_path); - - // Recursive. - ld(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr); - addi(tmp1, tmp1, 1); - std(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr); + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register tagged_monitor = mark; + const uintptr_t monitor_tag = markWord::monitor_value; + const Register owner_addr = tmp2; + + // Compute owner address. + addi(owner_addr, tagged_monitor, in_bytes(ObjectMonitor::owner_offset()) - monitor_tag); + + // CAS owner (null => current thread). + cmpxchgd(/*flag=*/flag, + /*current_value=*/t, + /*compare_value=*/(intptr_t)0, + /*exchange_value=*/R16_thread, + /*where=*/owner_addr, + MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, + MacroAssembler::cmpxchgx_hint_acquire_lock()); + beq(flag, locked); + + // Check if recursive. + cmpd(flag, t, R16_thread); + bne(flag, slow_path); + + // Recursive. + ld(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr); + addi(tmp1, tmp1, 1); + std(tmp1, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), owner_addr); + } else { + // OMCache lookup not supported yet. Take the slowpath. + // Set flag to NE + crxor(flag, Assembler::equal, flag, Assembler::equal); + b(slow_path); + } } bind(locked); @@ -2943,49 +2950,56 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f bind(check_done); #endif - // mark contains the tagged ObjectMonitor*. - const Register monitor = mark; - const uintptr_t monitor_tag = markWord::monitor_value; - - // Untag the monitor. - subi(monitor, mark, monitor_tag); - - const Register recursions = tmp2; - Label not_recursive; - - // Check if recursive. - ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor); - addic_(recursions, recursions, -1); - blt(CCR0, not_recursive); - - // Recursive unlock. - std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor); - crorc(CCR0, Assembler::equal, CCR0, Assembler::equal); - b(unlocked); - - bind(not_recursive); - - Label release_; - const Register t2 = tmp2; - - // Check if the entry lists are empty. - ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor); - ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor); - orr(t, t, t2); - cmpdi(flag, t, 0); - beq(flag, release_); - - // The owner may be anonymous and we removed the last obj entry in - // the lock-stack. This loses the information about the owner. - // Write the thread to the owner field so the runtime knows the owner. - std(R16_thread, in_bytes(ObjectMonitor::owner_offset()), monitor); - b(slow_path); - - bind(release_); - // Set owner to null. - release(); - // t contains 0 - std(t, in_bytes(ObjectMonitor::owner_offset()), monitor); + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register monitor = mark; + const uintptr_t monitor_tag = markWord::monitor_value; + + // Untag the monitor. + subi(monitor, mark, monitor_tag); + + const Register recursions = tmp2; + Label not_recursive; + + // Check if recursive. + ld(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor); + addic_(recursions, recursions, -1); + blt(CCR0, not_recursive); + + // Recursive unlock. + std(recursions, in_bytes(ObjectMonitor::recursions_offset()), monitor); + crorc(CCR0, Assembler::equal, CCR0, Assembler::equal); + b(unlocked); + + bind(not_recursive); + + Label release_; + const Register t2 = tmp2; + + // Check if the entry lists are empty. + ld(t, in_bytes(ObjectMonitor::EntryList_offset()), monitor); + ld(t2, in_bytes(ObjectMonitor::cxq_offset()), monitor); + orr(t, t, t2); + cmpdi(flag, t, 0); + beq(flag, release_); + + // The owner may be anonymous and we removed the last obj entry in + // the lock-stack. This loses the information about the owner. + // Write the thread to the owner field so the runtime knows the owner. + std(R16_thread, in_bytes(ObjectMonitor::owner_offset()), monitor); + b(slow_path); + + bind(release_); + // Set owner to null. + release(); + // t contains 0 + std(t, in_bytes(ObjectMonitor::owner_offset()), monitor); + } else { + // OMCache lookup not supported yet. Take the slowpath. + // Set flag to NE + crxor(flag, Assembler::equal, flag, Assembler::equal); + b(slow_path); + } } bind(unlocked); diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index d88e4bf320d..8322b35e205 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -323,25 +323,30 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis { // Handle inflated monitor. bind(inflated); - // mark contains the tagged ObjectMonitor*. - const Register tmp1_tagged_monitor = tmp1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; - const Register tmp2_owner_addr = tmp2; - const Register tmp3_owner = tmp3; - - // Compute owner address. - la(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); - - // CAS owner (null => current thread). - cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64, - /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner); - beqz(tmp3_owner, locked); - - // Check if recursive. - bne(tmp3_owner, xthread, slow_path); - - // Recursive. - increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1, tmp2, tmp3); + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register tmp1_tagged_monitor = tmp1_mark; + const uintptr_t monitor_tag = markWord::monitor_value; + const Register tmp2_owner_addr = tmp2; + const Register tmp3_owner = tmp3; + + // Compute owner address. + la(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); + + // CAS owner (null => current thread). + cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64, + /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner); + beqz(tmp3_owner, locked); + + // Check if recursive. + bne(tmp3_owner, xthread, slow_path); + + // Recursive. + increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1, tmp2, tmp3); + } else { + // OMCache lookup not supported yet. Take the slowpath. + j(slow_path); + } } bind(locked); @@ -453,49 +458,54 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg bind(check_done); #endif - // mark contains the tagged ObjectMonitor*. - const Register tmp1_monitor = tmp1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register tmp1_monitor = tmp1_mark; + const uintptr_t monitor_tag = markWord::monitor_value; - // Untag the monitor. - sub(tmp1_monitor, tmp1_mark, monitor_tag); + // Untag the monitor. + sub(tmp1_monitor, tmp1_mark, monitor_tag); - const Register tmp2_recursions = tmp2; - Label not_recursive; + const Register tmp2_recursions = tmp2; + Label not_recursive; - // Check if recursive. - ld(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - beqz(tmp2_recursions, not_recursive); + // Check if recursive. + ld(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + beqz(tmp2_recursions, not_recursive); - // Recursive unlock. - addi(tmp2_recursions, tmp2_recursions, -1); - sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - j(unlocked); + // Recursive unlock. + addi(tmp2_recursions, tmp2_recursions, -1); + sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + j(unlocked); - bind(not_recursive); + bind(not_recursive); - Label release; - const Register tmp2_owner_addr = tmp2; + Label release; + const Register tmp2_owner_addr = tmp2; - // Compute owner address. - la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); + // Compute owner address. + la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); - // Check if the entry lists are empty. - ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); - ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); - orr(t0, t0, tmp3_t); - beqz(t0, release); + // Check if the entry lists are empty. + ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); + ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); + orr(t0, t0, tmp3_t); + beqz(t0, release); - // The owner may be anonymous and we removed the last obj entry in - // the lock-stack. This loses the information about the owner. - // Write the thread to the owner field so the runtime knows the owner. - sd(xthread, Address(tmp2_owner_addr)); - j(slow_path); + // The owner may be anonymous and we removed the last obj entry in + // the lock-stack. This loses the information about the owner. + // Write the thread to the owner field so the runtime knows the owner. + sd(xthread, Address(tmp2_owner_addr)); + j(slow_path); - bind(release); - // Set owner to null. - membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); - sd(zr, Address(tmp2_owner_addr)); + bind(release); + // Set owner to null. + membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); + sd(zr, Address(tmp2_owner_addr)); + } else { + // OMCache lookup not supported yet. Take the slowpath. + j(slow_path); + } } bind(unlocked); diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index af6d043d1d6..17b75b30264 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -792,15 +792,9 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) bind(slow_case); // Call the runtime routine for slow case - if (LockingMode == LM_LIGHTWEIGHT) { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), - obj_reg); - } else { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), - lock_reg); - } + call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), + lock_reg); j(done); bind(count); diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index 14bb98cea6a..0b29c31ec96 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -1072,16 +1072,9 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { // None of the above fast optimizations worked so we have to get into the // slow case of monitor enter. bind(slow_case); - if (LockingMode == LM_LIGHTWEIGHT) { - // for lightweight locking we need to use monitorenter_obj, see interpreterRuntime.cpp - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), - object); - } else { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), - monitor); - } + call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), + monitor); // } bind(done); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 72d10ee80aa..b72b36eef53 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -6218,26 +6218,33 @@ void MacroAssembler::compiler_fast_lock_lightweight_object(Register obj, Registe { // Handle inflated monitor. bind(inflated); - // mark contains the tagged ObjectMonitor*. - const Register tagged_monitor = mark; - const Register zero = tmp2; - - // Try to CAS m->owner from null to current thread. - // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. - // Otherwise, register zero is filled with the current owner. - z_lghi(zero, 0); - z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), tagged_monitor); - z_bre(locked); - - // Check if recursive. - z_cgr(Z_thread, zero); // zero contains the owner from z_csg instruction - z_brne(slow_path); - - // Recursive - z_agsi(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll); - z_cgr(zero, zero); - // z_bru(locked); - // Uncomment above line in the future, for now jump address is right next to us. + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register tagged_monitor = mark; + const Register zero = tmp2; + + // Try to CAS m->owner from null to current thread. + // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. + // Otherwise, register zero is filled with the current owner. + z_lghi(zero, 0); + z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), tagged_monitor); + z_bre(locked); + + // Check if recursive. + z_cgr(Z_thread, zero); // zero contains the owner from z_csg instruction + z_brne(slow_path); + + // Recursive + z_agsi(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll); + z_cgr(zero, zero); + // z_bru(locked); + // Uncomment above line in the future, for now jump address is right next to us. + } else { + // OMCache lookup not supported yet. Take the slowpath. + // Set flag to NE + z_ltgr(obj, obj); + z_bru(slow_path); + } } BLOCK_COMMENT("} handle_inflated_monitor_lightweight_locking"); @@ -6364,42 +6371,49 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis bind(check_done); #endif // ASSERT - // mark contains the tagged ObjectMonitor*. - const Register monitor = mark; + if (!UseObjectMonitorTable) { + // mark contains the tagged ObjectMonitor*. + const Register monitor = mark; - NearLabel not_recursive; - const Register recursions = tmp2; + NearLabel not_recursive; + const Register recursions = tmp2; - // Check if recursive. - load_and_test_long(recursions, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); - z_bre(not_recursive); // if 0 then jump, it's not recursive locking + // Check if recursive. + load_and_test_long(recursions, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); + z_bre(not_recursive); // if 0 then jump, it's not recursive locking - // Recursive unlock - z_agsi(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll); - z_cgr(monitor, monitor); // set the CC to EQUAL - z_bru(unlocked); + // Recursive unlock + z_agsi(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll); + z_cgr(monitor, monitor); // set the CC to EQUAL + z_bru(unlocked); - bind(not_recursive); + bind(not_recursive); - NearLabel not_ok; - // Check if the entry lists are empty. - load_and_test_long(tmp2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); - z_brne(not_ok); - load_and_test_long(tmp2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); - z_brne(not_ok); + NearLabel not_ok; + // Check if the entry lists are empty. + load_and_test_long(tmp2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); + z_brne(not_ok); + load_and_test_long(tmp2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); + z_brne(not_ok); - z_release(); - z_stg(tmp2 /*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor); + z_release(); + z_stg(tmp2 /*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor); - z_bru(unlocked); // CC = EQ here + z_bru(unlocked); // CC = EQ here - bind(not_ok); + bind(not_ok); - // The owner may be anonymous, and we removed the last obj entry in - // the lock-stack. This loses the information about the owner. - // Write the thread to the owner field so the runtime knows the owner. - z_stg(Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor); - z_bru(slow_path); // CC = NE here + // The owner may be anonymous, and we removed the last obj entry in + // the lock-stack. This loses the information about the owner. + // Write the thread to the owner field so the runtime knows the owner. + z_stg(Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor); + z_bru(slow_path); // CC = NE here + } else { + // OMCache lookup not supported yet. Take the slowpath. + // Set flag to NE + z_ltgr(obj, obj); + z_bru(slow_path); + } } bind(unlocked); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 2374324ca7c..576592d05aa 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -66,11 +66,13 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr if (LockingMode == LM_LIGHTWEIGHT) { #ifdef _LP64 const Register thread = r15_thread; + lightweight_lock(disp_hdr, obj, hdr, thread, tmp, slow_case); #else - const Register thread = disp_hdr; - get_thread(thread); + // Implicit null check. + movptr(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); + // Lacking registers and thread on x86_32. Always take slow path. + jmp(slow_case); #endif - lightweight_lock(obj, hdr, thread, tmp, slow_case); } else if (LockingMode == LM_LEGACY) { Label done; // Load object header @@ -139,10 +141,8 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_ #ifdef _LP64 lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case); #else - // This relies on the implementation of lightweight_unlock being able to handle - // that the reg_rax and thread Register parameters may alias each other. - get_thread(disp_hdr); - lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case); + // Lacking registers and thread on x86_32. Always take slow path. + jmp(slow_case); #endif } else if (LockingMode == LM_LEGACY) { // test if object header is pointing to the displaced header, and if so, restore diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp index 6dc8d14064a..1990488d8a0 100644 --- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp @@ -96,6 +96,7 @@ void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) { { // Restore held monitor count and slow path. __ bind(restore_held_monitor_count_and_slow_path); + __ bind(_slow_path); // Restore held monitor count. __ increment(Address(_thread, JavaThread::held_monitor_count_offset())); // increment will always result in ZF = 0 (no overflows). @@ -112,19 +113,23 @@ void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) { #ifndef _LP64 __ jmpb(restore_held_monitor_count_and_slow_path); #else // _LP64 + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address succ_address(monitor, ObjectMonitor::succ_offset() - monitor_tag); + const Address owner_address(monitor, ObjectMonitor::owner_offset() - monitor_tag); + // successor null check. - __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD); + __ cmpptr(succ_address, NULL_WORD); __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path); // Release lock. - __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); + __ movptr(owner_address, NULL_WORD); // Fence. // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack. __ lock(); __ addl(Address(rsp, 0), 0); // Recheck successor. - __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD); + __ cmpptr(succ_address, NULL_WORD); // Observed a successor after the release -> fence we have handed off the monitor __ jccb(Assembler::notEqual, fix_zf_and_unlocked); @@ -133,7 +138,7 @@ void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) { // not handle the monitor handoff. Currently only works // due to the responsible thread. __ xorptr(rax, rax); - __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); + __ lock(); __ cmpxchgptr(_thread, owner_address); __ jccb (Assembler::equal, restore_held_monitor_count_and_slow_path); #endif diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index faab09b7c17..5dbfdbc225d 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -590,6 +590,11 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist // Finish fast lock unsuccessfully. MUST jump with ZF == 0 Label slow_path; + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + movptr(Address(box, BasicLock::object_monitor_cache_offset_in_bytes()), 0); + } + if (DiagnoseSyncOnValueBasedClasses != 0) { load_klass(rax_reg, obj, t); movl(rax_reg, Address(rax_reg, Klass::access_flags_offset())); @@ -603,7 +608,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist Label push; - const Register top = box; + const Register top = UseObjectMonitorTable ? rax_reg : box; // Load the mark. movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); @@ -630,6 +635,10 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); jcc(Assembler::notEqual, slow_path); + if (UseObjectMonitorTable) { + // Need to reload top, clobbered by CAS. + movl(top, Address(thread, JavaThread::lock_stack_top_offset())); + } bind(push); // After successful lock, push object on lock-stack. movptr(Address(thread, top), obj); @@ -640,19 +649,68 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist { // Handle inflated monitor. bind(inflated); - const Register tagged_monitor = mark; + const Register monitor = t; + + if (!UseObjectMonitorTable) { + assert(mark == monitor, "should be the same here"); + } else { + // Uses ObjectMonitorTable. Look for the monitor in the om_cache. + // Fetch ObjectMonitor* from the cache or take the slow-path. + Label monitor_found; + + // Load cache address + lea(t, Address(thread, JavaThread::om_cache_oops_offset())); + + const int num_unrolled = 2; + for (int i = 0; i < num_unrolled; i++) { + cmpptr(obj, Address(t)); + jccb(Assembler::equal, monitor_found); + increment(t, in_bytes(OMCache::oop_to_oop_difference())); + } + + Label loop; + + // Search for obj in cache. + bind(loop); + + // Check for match. + cmpptr(obj, Address(t)); + jccb(Assembler::equal, monitor_found); + + // Search until null encountered, guaranteed _null_sentinel at end. + cmpptr(Address(t), 1); + jcc(Assembler::below, slow_path); // 0 check, but with ZF=0 when *t == 0 + increment(t, in_bytes(OMCache::oop_to_oop_difference())); + jmpb(loop); + + // Cache hit. + bind(monitor_found); + movptr(monitor, Address(t, OMCache::oop_to_monitor_difference())); + } + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address recursions_address(monitor, ObjectMonitor::recursions_offset() - monitor_tag); + const Address owner_address(monitor, ObjectMonitor::owner_offset() - monitor_tag); + + Label monitor_locked; + // Lock the monitor. // CAS owner (null => current thread). xorptr(rax_reg, rax_reg); - lock(); cmpxchgptr(thread, Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); - jccb(Assembler::equal, locked); + lock(); cmpxchgptr(thread, owner_address); + jccb(Assembler::equal, monitor_locked); // Check if recursive. cmpptr(thread, rax_reg); jccb(Assembler::notEqual, slow_path); // Recursive. - increment(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); + increment(recursions_address); + + bind(monitor_locked); + if (UseObjectMonitorTable) { + // Cache the monitor for unlock + movptr(Address(box, BasicLock::object_monitor_cache_offset_in_bytes()), monitor); + } } bind(locked); @@ -694,7 +752,9 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, decrement(Address(thread, JavaThread::held_monitor_count_offset())); const Register mark = t; - const Register top = reg_rax; + const Register monitor = t; + const Register top = UseObjectMonitorTable ? t : reg_rax; + const Register box = reg_rax; Label dummy; C2FastUnlockLightweightStub* stub = nullptr; @@ -706,14 +766,17 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path(); Label& check_successor = stub == nullptr ? dummy : stub->check_successor(); + Label& slow_path = stub == nullptr ? dummy : stub->slow_path(); { // Lightweight Unlock // Load top. movl(top, Address(thread, JavaThread::lock_stack_top_offset())); - // Prefetch mark. - movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + if (!UseObjectMonitorTable) { + // Prefetch mark. + movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + } // Check if obj is top of lock-stack. cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); @@ -730,6 +793,11 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, // We elide the monitor check, let the CAS fail instead. + if (UseObjectMonitorTable) { + // Load mark. + movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + } + // Try to unlock. Transition lock bits 0b00 => 0b01 movptr(reg_rax, mark); andptr(reg_rax, ~(int32_t)markWord::lock_mask); @@ -751,6 +819,9 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, jccb(Assembler::notEqual, inflated_check_lock_stack); stop("Fast Unlock lock on stack"); bind(check_done); + if (UseObjectMonitorTable) { + movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + } testptr(mark, markWord::monitor_value); jccb(Assembler::notZero, inflated); stop("Fast Unlock not monitor"); @@ -758,43 +829,40 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, bind(inflated); - // mark contains the tagged ObjectMonitor*. - const Register monitor = mark; - -#ifndef _LP64 - // Check if recursive. - xorptr(reg_rax, reg_rax); - orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); - jcc(Assembler::notZero, check_successor); - - // Check if the entry lists are empty. - movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); - orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); - jcc(Assembler::notZero, check_successor); + if (!UseObjectMonitorTable) { + assert(mark == monitor, "should be the same here"); + } else { + // Uses ObjectMonitorTable. Look for the monitor in our BasicLock on the stack. + movptr(monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + // null check with ZF == 0, no valid pointer below alignof(ObjectMonitor*) + cmpptr(monitor, alignof(ObjectMonitor*)); + jcc(Assembler::below, slow_path); + } + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address recursions_address{monitor, ObjectMonitor::recursions_offset() - monitor_tag}; + const Address cxq_address{monitor, ObjectMonitor::cxq_offset() - monitor_tag}; + const Address EntryList_address{monitor, ObjectMonitor::EntryList_offset() - monitor_tag}; + const Address owner_address{monitor, ObjectMonitor::owner_offset() - monitor_tag}; - // Release lock. - movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); -#else // _LP64 Label recursive; // Check if recursive. - cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0); + cmpptr(recursions_address, 0); jccb(Assembler::notEqual, recursive); // Check if the entry lists are empty. - movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); - orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); + movptr(reg_rax, cxq_address); + orptr(reg_rax, EntryList_address); jcc(Assembler::notZero, check_successor); // Release lock. - movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); + movptr(owner_address, NULL_WORD); jmpb(unlocked); // Recursive unlock. bind(recursive); - decrement(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); + decrement(recursions_address); xorl(t, t); -#endif } bind(unlocked); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index 57d77bafd4b..249506c13ff 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1183,11 +1183,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { if (LockingMode == LM_LIGHTWEIGHT) { #ifdef _LP64 const Register thread = r15_thread; + lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case); #else - const Register thread = lock_reg; - get_thread(thread); + // Lacking registers and thread on x86_32. Always take slow path. + jmp(slow_case); #endif - lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case); } else if (LockingMode == LM_LEGACY) { // Load immediate 1 into swap_reg %rax movl(swap_reg, 1); @@ -1249,15 +1249,9 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { bind(slow_case); // Call the runtime routine for slow case - if (LockingMode == LM_LIGHTWEIGHT) { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter_obj), - obj_reg); - } else { - call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), - lock_reg); - } + call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), + lock_reg); bind(done); } } @@ -1306,10 +1300,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) { #ifdef _LP64 lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case); #else - // This relies on the implementation of lightweight_unlock being able to handle - // that the reg_rax and thread Register parameters may alias each other. - get_thread(swap_reg); - lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case); + // Lacking registers and thread on x86_32. Always take slow path. + jmp(slow_case); #endif } else if (LockingMode == LM_LEGACY) { // Load the old header from BasicLock structure diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 78bcabb2000..a5ad19806ea 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -10275,9 +10275,9 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne // reg_rax: rax // thread: the thread which attempts to lock obj // tmp: a temporary register -void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { +void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { assert(reg_rax == rax, ""); - assert_different_registers(obj, reg_rax, thread, tmp); + assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); Label push; const Register top = tmp; @@ -10286,6 +10286,11 @@ void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register t // instruction emitted as it is part of C1's null check semantics. movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); + } + // Load top. movl(top, Address(thread, JavaThread::lock_stack_top_offset())); @@ -10324,13 +10329,9 @@ void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register t // reg_rax: rax // thread: the thread // tmp: a temporary register -// -// x86_32 Note: reg_rax and thread may alias each other due to limited register -// availiability. void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { assert(reg_rax == rax, ""); - assert_different_registers(obj, reg_rax, tmp); - LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);) + assert_different_registers(obj, reg_rax, thread, tmp); Label unlocked, push_and_slow; const Register top = tmp; @@ -10370,10 +10371,6 @@ void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register bind(push_and_slow); // Restore lock-stack and handle the unlock in runtime. - if (thread == reg_rax) { - // On x86_32 we may lose the thread. - get_thread(thread); - } #ifdef ASSERT movl(top, Address(thread, JavaThread::lock_stack_top_offset())); movptr(Address(thread, top), obj); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 2ecd2bbe96d..594f0b95ca3 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -2148,7 +2148,7 @@ class MacroAssembler: public Assembler { void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); - void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); + void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); #ifdef _LP64 diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp index 3ecbb43f7f5..78330962d1a 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86.cpp @@ -62,9 +62,11 @@ void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* mas if (LockingMode == LM_LIGHTWEIGHT) { - // check if monitor - __ testptr(result, markWord::monitor_value); - __ jcc(Assembler::notZero, slowCase); + if (!UseObjectMonitorTable) { + // check if monitor + __ testptr(result, markWord::monitor_value); + __ jcc(Assembler::notZero, slowCase); + } } else { // check if locked __ testptr(result, markWord::unlocked_value); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index d313c1b216a..85c9125a97d 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -1686,7 +1686,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ jcc(Assembler::notEqual, slow_path_lock); } else { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - __ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock); + // Lacking registers and thread on x86_32. Always take slow path. + __ jmp(slow_path_lock); } __ bind(count_mon); __ inc_held_monitor_count(); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index d27b1d141fc..b5362a9942c 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -2266,7 +2266,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ jcc(Assembler::notEqual, slow_path_lock); } else { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - __ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock); + __ lightweight_lock(lock_reg, obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock); } __ bind(count_mon); __ inc_held_monitor_count(); diff --git a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp index 9e00b1c5b04..2b53042ef10 100644 --- a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp +++ b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp @@ -37,6 +37,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -44,6 +45,7 @@ #include "runtime/timer.hpp" #include "runtime/timerTrace.hpp" #include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "entry_zero.hpp" @@ -331,23 +333,27 @@ int ZeroInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) { if (method->is_synchronized()) { monitor = (BasicObjectLock*) istate->stack_base(); oop lockee = monitor->obj(); - markWord disp = lockee->mark().set_unlocked(); - monitor->lock()->set_displaced_header(disp); - bool call_vm = (LockingMode == LM_MONITOR); - bool inc_monitor_count = true; - if (call_vm || lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) { - // Is it simple recursive case? - if (!call_vm && thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) { - monitor->lock()->set_displaced_header(markWord::from_pointer(nullptr)); - } else { - inc_monitor_count = false; - CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor)); - if (HAS_PENDING_EXCEPTION) - goto unwind_and_return; + bool success = false; + if (LockingMode == LM_LEGACY) { + markWord disp = lockee->mark().set_unlocked(); + monitor->lock()->set_displaced_header(disp); + success = true; + if (lockee->cas_set_mark(markWord::from_pointer(monitor), disp) != disp) { + // Is it simple recursive case? + if (thread->is_lock_owned((address) disp.clear_lock_bits().to_pointer())) { + monitor->lock()->set_displaced_header(markWord::from_pointer(nullptr)); + } else { + success = false; + } + } + if (success) { + THREAD->inc_held_monitor_count(); } } - if (inc_monitor_count) { - THREAD->inc_held_monitor_count(); + if (!success) { + CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor)); + if (HAS_PENDING_EXCEPTION) + goto unwind_and_return; } } diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 31ae3f6bee0..8524f37177b 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -757,8 +757,8 @@ JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, if (LockingMode == LM_MONITOR) { lock->set_obj(obj); } - assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match"); - SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current); + assert(obj == lock->obj(), "must match"); + SharedRuntime::monitor_enter_helper(obj, lock->lock(), current); JRT_END diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 4f2eae023f6..525258b1ebd 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -71,7 +71,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/threadCritical.hpp" #include "utilities/align.hpp" #include "utilities/checkedCast.hpp" @@ -725,7 +725,6 @@ void InterpreterRuntime::resolve_get_put(Bytecodes::Code bytecode, int field_ind //%note monitor_1 JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, BasicObjectLock* elem)) - assert(LockingMode != LM_LIGHTWEIGHT, "Should call monitorenter_obj() when using the new lightweight locking"); #ifdef ASSERT current->last_frame().interpreter_frame_verify_monitor(elem); #endif @@ -740,23 +739,6 @@ JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, B #endif JRT_END -// NOTE: We provide a separate implementation for the new lightweight locking to workaround a limitation -// of registers in x86_32. This entry point accepts an oop instead of a BasicObjectLock*. -// The problem is that we would need to preserve the register that holds the BasicObjectLock, -// but we are using that register to hold the thread. We don't have enough registers to -// also keep the BasicObjectLock, but we don't really need it anyway, we only need -// the object. See also InterpreterMacroAssembler::lock_object(). -// As soon as legacy stack-locking goes away we could remove the other monitorenter() entry -// point, and only use oop-accepting entries (same for monitorexit() below). -JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter_obj(JavaThread* current, oopDesc* obj)) - assert(LockingMode == LM_LIGHTWEIGHT, "Should call monitorenter() when not using the new lightweight locking"); - Handle h_obj(current, cast_to_oop(obj)); - assert(Universe::heap()->is_in_or_null(h_obj()), - "must be null or an object"); - ObjectSynchronizer::enter(h_obj, nullptr, current); - return; -JRT_END - JRT_LEAF(void, InterpreterRuntime::monitorexit(BasicObjectLock* elem)) oop obj = elem->obj(); assert(Universe::heap()->is_in(obj), "must be an object"); diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index 36b01bb0802..fbdf8f9ca71 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -53,7 +53,9 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/atomic.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" +#include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/orderAccess.hpp" @@ -61,6 +63,7 @@ #include "runtime/threadCritical.hpp" #include "utilities/debug.hpp" #include "utilities/exceptions.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" /* @@ -624,23 +627,28 @@ void BytecodeInterpreter::run(interpreterState istate) { BasicObjectLock* mon = &istate->monitor_base()[-1]; mon->set_obj(rcvr); - // Traditional lightweight locking. - markWord displaced = rcvr->mark().set_unlocked(); - mon->lock()->set_displaced_header(displaced); - bool call_vm = (LockingMode == LM_MONITOR); - bool inc_monitor_count = true; - if (call_vm || rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) { - // Is it simple recursive case? - if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { - mon->lock()->set_displaced_header(markWord::from_pointer(nullptr)); - } else { - inc_monitor_count = false; - CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); + bool success = false; + if (LockingMode == LM_LEGACY) { + // Traditional fast locking. + markWord displaced = rcvr->mark().set_unlocked(); + mon->lock()->set_displaced_header(displaced); + success = true; + if (rcvr->cas_set_mark(markWord::from_pointer(mon), displaced) != displaced) { + // Is it simple recursive case? + if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { + mon->lock()->set_displaced_header(markWord::from_pointer(nullptr)); + } else { + success = false; + } + } + if (success) { + THREAD->inc_held_monitor_count(); } } - if (inc_monitor_count) { - THREAD->inc_held_monitor_count(); + if (!success) { + CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); } + } THREAD->set_do_not_unlock_if_synchronized(false); @@ -723,23 +731,28 @@ void BytecodeInterpreter::run(interpreterState istate) { assert(entry->obj() == nullptr, "Frame manager didn't allocate the monitor"); entry->set_obj(lockee); - // traditional lightweight locking - markWord displaced = lockee->mark().set_unlocked(); - entry->lock()->set_displaced_header(displaced); - bool call_vm = (LockingMode == LM_MONITOR); - bool inc_monitor_count = true; - if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) { - // Is it simple recursive case? - if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { - entry->lock()->set_displaced_header(markWord::from_pointer(nullptr)); - } else { - inc_monitor_count = false; - CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); + bool success = false; + if (LockingMode == LM_LEGACY) { + // Traditional fast locking. + markWord displaced = lockee->mark().set_unlocked(); + entry->lock()->set_displaced_header(displaced); + success = true; + if (lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) { + // Is it simple recursive case? + if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { + entry->lock()->set_displaced_header(markWord::from_pointer(nullptr)); + } else { + success = false; + } + } + if (success) { + THREAD->inc_held_monitor_count(); } } - if (inc_monitor_count) { - THREAD->inc_held_monitor_count(); + if (!success) { + CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); } + UPDATE_PC_AND_TOS(1, -1); goto run; } @@ -1653,23 +1666,28 @@ void BytecodeInterpreter::run(interpreterState istate) { if (entry != nullptr) { entry->set_obj(lockee); - // traditional lightweight locking - markWord displaced = lockee->mark().set_unlocked(); - entry->lock()->set_displaced_header(displaced); - bool call_vm = (LockingMode == LM_MONITOR); - bool inc_monitor_count = true; - if (call_vm || lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) { - // Is it simple recursive case? - if (!call_vm && THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { - entry->lock()->set_displaced_header(markWord::from_pointer(nullptr)); - } else { - inc_monitor_count = false; - CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); + bool success = false; + if (LockingMode == LM_LEGACY) { + // Traditional fast locking. + markWord displaced = lockee->mark().set_unlocked(); + entry->lock()->set_displaced_header(displaced); + success = true; + if (lockee->cas_set_mark(markWord::from_pointer(entry), displaced) != displaced) { + // Is it simple recursive case? + if (THREAD->is_lock_owned((address) displaced.clear_lock_bits().to_pointer())) { + entry->lock()->set_displaced_header(markWord::from_pointer(nullptr)); + } else { + success = false; + } + } + if (success) { + THREAD->inc_held_monitor_count(); } } - if (inc_monitor_count) { - THREAD->inc_held_monitor_count(); + if (!success) { + CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); } + UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); } else { istate->set_msg(more_monitors); @@ -1687,23 +1705,27 @@ void BytecodeInterpreter::run(interpreterState istate) { while (most_recent != limit ) { if ((most_recent)->obj() == lockee) { BasicLock* lock = most_recent->lock(); - markWord header = lock->displaced_header(); - most_recent->set_obj(nullptr); - // If it isn't recursive we either must swap old header or call the runtime - bool dec_monitor_count = true; - bool call_vm = (LockingMode == LM_MONITOR); - if (header.to_pointer() != nullptr || call_vm) { - markWord old_header = markWord::encode(lock); - if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) { - // restore object for the slow case - most_recent->set_obj(lockee); - dec_monitor_count = false; - InterpreterRuntime::monitorexit(most_recent); + bool success = false; + if (LockingMode == LM_LEGACY) { + // If it isn't recursive we either must swap old header or call the runtime + most_recent->set_obj(nullptr); + success = true; + markWord header = lock->displaced_header(); + if (header.to_pointer() != nullptr) { + markWord old_header = markWord::encode(lock); + if (lockee->cas_set_mark(header, old_header) != old_header) { + // restore object for the slow case + most_recent->set_obj(lockee); + success = false; + } + } + if (success) { + THREAD->dec_held_monitor_count(); } } - if (dec_monitor_count) { - THREAD->dec_held_monitor_count(); + if (!success) { + InterpreterRuntime::monitorexit(most_recent); } UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); } @@ -3125,22 +3147,28 @@ void BytecodeInterpreter::run(interpreterState istate) { oop lockee = end->obj(); if (lockee != nullptr) { BasicLock* lock = end->lock(); - markWord header = lock->displaced_header(); - end->set_obj(nullptr); - - // If it isn't recursive we either must swap old header or call the runtime - bool dec_monitor_count = true; - if (header.to_pointer() != nullptr) { - markWord old_header = markWord::encode(lock); - if (lockee->cas_set_mark(header, old_header) != old_header) { - // restore object for the slow case - end->set_obj(lockee); - dec_monitor_count = false; - InterpreterRuntime::monitorexit(end); + + bool success = false; + if (LockingMode == LM_LEGACY) { + markWord header = lock->displaced_header(); + end->set_obj(nullptr); + + // If it isn't recursive we either must swap old header or call the runtime + success = true; + if (header.to_pointer() != nullptr) { + markWord old_header = markWord::encode(lock); + if (lockee->cas_set_mark(header, old_header) != old_header) { + // restore object for the slow case + end->set_obj(lockee); + success = false; + } + } + if (success) { + THREAD->dec_held_monitor_count(); } } - if (dec_monitor_count) { - THREAD->dec_held_monitor_count(); + if (!success) { + InterpreterRuntime::monitorexit(end); } // One error is plenty @@ -3188,7 +3216,7 @@ void BytecodeInterpreter::run(interpreterState istate) { illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); THREAD->clear_pending_exception(); } - } else if (LockingMode == LM_MONITOR) { + } else if (LockingMode != LM_LEGACY) { InterpreterRuntime::monitorexit(base); if (THREAD->has_pending_exception()) { if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index ac895cc93f2..5870e49ac94 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -151,7 +151,7 @@ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], Klass*) \ \ - volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \ + volatile_nonstatic_field(BasicLock, _metadata, uintptr_t) \ \ static_field(CodeCache, _low_bound, address) \ static_field(CodeCache, _high_bound, address) \ @@ -241,6 +241,7 @@ nonstatic_field(JavaThread, _stack_overflow_state._reserved_stack_activation, address) \ nonstatic_field(JavaThread, _held_monitor_count, intx) \ nonstatic_field(JavaThread, _lock_stack, LockStack) \ + nonstatic_field(JavaThread, _om_cache, OMCache) \ JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_VTMS_transition, bool)) \ JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_tmp_VTMS_transition, bool)) \ JVMTI_ONLY(nonstatic_field(JavaThread, _is_disable_suspend, bool)) \ @@ -531,6 +532,8 @@ \ declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \ declare_constant_with_value("LockStack::_end_offset", LockStack::end_offset()) \ + declare_constant_with_value("OMCache::oop_to_oop_difference", OMCache::oop_to_oop_difference()) \ + declare_constant_with_value("OMCache::oop_to_monitor_difference", OMCache::oop_to_monitor_difference()) \ \ declare_constant(CodeInstaller::VERIFIED_ENTRY) \ declare_constant(CodeInstaller::UNVERIFIED_ENTRY) \ diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp index b64097311a4..b53a015b0f2 100644 --- a/src/hotspot/share/logging/logTag.hpp +++ b/src/hotspot/share/logging/logTag.hpp @@ -130,6 +130,7 @@ class outputStream; LOG_TAG(module) \ LOG_TAG(monitorinflation) \ LOG_TAG(monitormismatch) \ + LOG_TAG(monitortable) \ LOG_TAG(native) \ LOG_TAG(nestmates) \ LOG_TAG(nmethod) \ diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp index e861ab87182..2bbec570fa8 100644 --- a/src/hotspot/share/oops/markWord.cpp +++ b/src/hotspot/share/oops/markWord.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "oops/markWord.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/objectMonitor.inline.hpp" #include "utilities/ostream.hpp" @@ -67,7 +68,7 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const { } else if (has_monitor()) { // last bits = 10 // have to check has_monitor() before is_locked() st->print(" monitor(" INTPTR_FORMAT ")=", value()); - if (print_monitor_info) { + if (print_monitor_info && !UseObjectMonitorTable) { ObjectMonitor* mon = monitor(); if (mon == nullptr) { st->print("null (this should never be seen!)"); diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index 12d6ee73acf..92577a8b40b 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -197,13 +197,17 @@ class markWord { } ObjectMonitor* monitor() const { assert(has_monitor(), "check"); + assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors"); // Use xor instead of &~ to provide one extra tag-bit check. return (ObjectMonitor*) (value() ^ monitor_value); } bool has_displaced_mark_helper() const { intptr_t lockbits = value() & lock_mask_in_place; - return LockingMode == LM_LIGHTWEIGHT ? lockbits == monitor_value // monitor? - : (lockbits & unlocked_value) == 0; // monitor | stack-locked? + if (LockingMode == LM_LIGHTWEIGHT) { + return !UseObjectMonitorTable && lockbits == monitor_value; + } + // monitor (0b10) | stack-locked (0b00)? + return (lockbits & unlocked_value) == 0; } markWord displaced_mark_helper() const; void set_displaced_mark_helper(markWord m) const; @@ -223,10 +227,15 @@ class markWord { return from_pointer(lock); } static markWord encode(ObjectMonitor* monitor) { + assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use markWord for monitors"); uintptr_t tmp = (uintptr_t) monitor; return markWord(tmp | monitor_value); } + markWord set_has_monitor() const { + return markWord((value() & ~lock_mask_in_place) | monitor_value); + } + // used to encode pointers during GC markWord clear_lock_bits() const { return markWord(value() & ~lock_mask_in_place); } diff --git a/src/hotspot/share/opto/c2_CodeStubs.hpp b/src/hotspot/share/opto/c2_CodeStubs.hpp index 1316fa68ed4..5db7596e072 100644 --- a/src/hotspot/share/opto/c2_CodeStubs.hpp +++ b/src/hotspot/share/opto/c2_CodeStubs.hpp @@ -103,6 +103,7 @@ class C2FastUnlockLightweightStub : public C2CodeStub { Register _mark; Register _t; Register _thread; + Label _slow_path; Label _push_and_slow_path; Label _check_successor; Label _unlocked_continuation; @@ -111,6 +112,7 @@ class C2FastUnlockLightweightStub : public C2CodeStub { _obj(obj), _mark(mark), _t(t), _thread(thread) {} int max_size() const; void emit(C2_MacroAssembler& masm); + Label& slow_path() { return _slow_path; } Label& push_and_slow_path() { return _push_and_slow_path; } Label& check_successor() { return _check_successor; } Label& unlocked_continuation() { return _unlocked_continuation; } diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 9660413dd19..542514b1f7e 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -4604,21 +4604,23 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { Node* no_ctrl = nullptr; Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); - // Test the header to see if it is safe to read w.r.t. locking. - Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place); - Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask)); - if (LockingMode == LM_LIGHTWEIGHT) { - Node *monitor_val = _gvn.MakeConX(markWord::monitor_value); - Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val)); - Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq)); - - generate_slow_guard(test_monitor, slow_region); - } else { - Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value); - Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val)); - Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne)); + if (!UseObjectMonitorTable) { + // Test the header to see if it is safe to read w.r.t. locking. + Node *lock_mask = _gvn.MakeConX(markWord::lock_mask_in_place); + Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask)); + if (LockingMode == LM_LIGHTWEIGHT) { + Node *monitor_val = _gvn.MakeConX(markWord::monitor_value); + Node *chk_monitor = _gvn.transform(new CmpXNode(lmasked_header, monitor_val)); + Node *test_monitor = _gvn.transform(new BoolNode(chk_monitor, BoolTest::eq)); + + generate_slow_guard(test_monitor, slow_region); + } else { + Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value); + Node *chk_unlocked = _gvn.transform(new CmpXNode(lmasked_header, unlocked_val)); + Node *test_not_unlocked = _gvn.transform(new BoolNode(chk_unlocked, BoolTest::ne)); - generate_slow_guard(test_not_unlocked, slow_region); + generate_slow_guard(test_not_unlocked, slow_region); + } } // Get the hash value and check to see that it has been properly assigned. diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index 1a6aec4e438..a05d6998093 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -56,6 +56,7 @@ #include "runtime/osThread.hpp" #include "runtime/signature.hpp" #include "runtime/stackWatermarkSet.inline.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/threads.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vframe.inline.hpp" @@ -1465,7 +1466,6 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec ThreadsListHandle tlh(current_thread); JavaThread *owning_thread = nullptr; - ObjectMonitor *mon = nullptr; jvmtiMonitorUsage ret = { nullptr, 0, 0, nullptr, 0, nullptr }; @@ -1495,9 +1495,11 @@ JvmtiEnvBase::get_object_monitor_usage(JavaThread* calling_thread, jobject objec ResourceMark rm(current_thread); GrowableArray* wantList = nullptr; - if (mark.has_monitor()) { - mon = mark.monitor(); - assert(mon != nullptr, "must have monitor"); + ObjectMonitor* mon = mark.has_monitor() + ? ObjectSynchronizer::read_monitor(current_thread, hobj(), mark) + : nullptr; + + if (mon != nullptr) { // this object has a heavyweight monitor nWant = mon->contentions(); // # of threads contending for monitor entry, but not re-entry nWait = mon->waiters(); // # of threads waiting for notification, diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 9086a5f6c71..81b40e76a31 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1816,8 +1816,18 @@ bool Arguments::check_vm_args_consistency() { FLAG_SET_CMDLINE(LockingMode, LM_LEGACY); warning("New lightweight locking not supported on this platform"); } + if (UseObjectMonitorTable) { + FLAG_SET_CMDLINE(UseObjectMonitorTable, false); + warning("UseObjectMonitorTable not supported on this platform"); + } #endif + if (UseObjectMonitorTable && LockingMode != LM_LIGHTWEIGHT) { + // ObjectMonitorTable requires lightweight locking. + FLAG_SET_CMDLINE(UseObjectMonitorTable, false); + warning("UseObjectMonitorTable requires LM_LIGHTWEIGHT"); + } + #if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390) if (LockingMode == LM_MONITOR) { jio_fprintf(defaultStream::error_stream(), diff --git a/src/hotspot/share/runtime/basicLock.cpp b/src/hotspot/share/runtime/basicLock.cpp index 704bea85576..ec2e66a7d5d 100644 --- a/src/hotspot/share/runtime/basicLock.cpp +++ b/src/hotspot/share/runtime/basicLock.cpp @@ -24,16 +24,24 @@ #include "precompiled.hpp" #include "oops/oop.inline.hpp" -#include "runtime/basicLock.hpp" +#include "runtime/basicLock.inline.hpp" +#include "runtime/objectMonitor.hpp" #include "runtime/synchronizer.hpp" void BasicLock::print_on(outputStream* st, oop owner) const { st->print("monitor"); - markWord mark_word = displaced_header(); - if (mark_word.value() != 0) { - // Print monitor info if there's an owning oop and it refers to this BasicLock. - bool print_monitor_info = (owner != nullptr) && (owner->mark() == markWord::from_pointer((void*)this)); - mark_word.print_on(st, print_monitor_info); + if (UseObjectMonitorTable) { + ObjectMonitor* mon = object_monitor_cache(); + if (mon != nullptr) { + mon->print_on(st); + } + } else if (LockingMode == LM_LEGACY) { + markWord mark_word = displaced_header(); + if (mark_word.value() != 0) { + // Print monitor info if there's an owning oop and it refers to this BasicLock. + bool print_monitor_info = (owner != nullptr) && (owner->mark() == markWord::from_pointer((void*)this)); + mark_word.print_on(st, print_monitor_info); + } } } @@ -82,10 +90,15 @@ void BasicLock::move_to(oop obj, BasicLock* dest) { // we can find any flavor mark in the displaced mark. } dest->set_displaced_header(displaced_header()); + } else if (UseObjectMonitorTable) { + // Preserve the ObjectMonitor*, the cache is cleared when a box is reused + // and only read while the lock is held, so no stale ObjectMonitor* is + // encountered. + dest->set_object_monitor_cache(object_monitor_cache()); } #ifdef ASSERT else { - dest->set_displaced_header(markWord(badDispHeaderDeopt)); + dest->set_bad_metadata_deopt(); } #endif } diff --git a/src/hotspot/share/runtime/basicLock.hpp b/src/hotspot/share/runtime/basicLock.hpp index c348fa7f9a2..193aa63cf8f 100644 --- a/src/hotspot/share/runtime/basicLock.hpp +++ b/src/hotspot/share/runtime/basicLock.hpp @@ -28,30 +28,46 @@ #include "oops/markWord.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/sizes.hpp" class BasicLock { friend class VMStructs; friend class JVMCIVMStructs; private: + // * For LM_MONITOR + // Unused. + // * For LM_LEGACY // This is either the actual displaced header from a locked object, or // a sentinel zero value indicating a recursive stack-lock. - volatile markWord _displaced_header; + // * For LM_LIGHTWEIGHT + // Used as a cache of the ObjectMonitor* used when locking. Must either + // be nullptr or the ObjectMonitor* used when locking. + volatile uintptr_t _metadata; + + uintptr_t get_metadata() const { return Atomic::load(&_metadata); } + void set_metadata(uintptr_t value) { Atomic::store(&_metadata, value); } + static int metadata_offset_in_bytes() { return (int)offset_of(BasicLock, _metadata); } + public: - markWord displaced_header() const { - return Atomic::load(&_displaced_header); - } + // LM_MONITOR + void set_bad_metadata_deopt() { set_metadata(badDispHeaderDeopt); } - void set_displaced_header(markWord header) { - Atomic::store(&_displaced_header, header); - } + // LM_LEGACY + inline markWord displaced_header() const; + inline void set_displaced_header(markWord header); + static int displaced_header_offset_in_bytes() { return metadata_offset_in_bytes(); } + + // LM_LIGHTWEIGHT + inline ObjectMonitor* object_monitor_cache() const; + inline void clear_object_monitor_cache(); + inline void set_object_monitor_cache(ObjectMonitor* mon); + static int object_monitor_cache_offset_in_bytes() { return metadata_offset_in_bytes(); } void print_on(outputStream* st, oop owner) const; // move a basic lock (used during deoptimization) void move_to(oop obj, BasicLock* dest); - - static int displaced_header_offset_in_bytes() { return (int)offset_of(BasicLock, _displaced_header); } }; // A BasicObjectLock associates a specific Java object with a BasicLock. diff --git a/src/hotspot/share/runtime/basicLock.inline.hpp b/src/hotspot/share/runtime/basicLock.inline.hpp new file mode 100644 index 00000000000..fb1cee8de8f --- /dev/null +++ b/src/hotspot/share/runtime/basicLock.inline.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_BASICLOCK_INLINE_HPP +#define SHARE_RUNTIME_BASICLOCK_INLINE_HPP + +#include "runtime/basicLock.hpp" + +inline markWord BasicLock::displaced_header() const { + assert(LockingMode == LM_LEGACY, "must be"); + return markWord(get_metadata()); +} + +inline void BasicLock::set_displaced_header(markWord header) { + assert(LockingMode == LM_LEGACY, "must be"); + Atomic::store(&_metadata, header.value()); +} + +inline ObjectMonitor* BasicLock::object_monitor_cache() const { + assert(UseObjectMonitorTable, "must be"); +#if defined(X86) || defined(AARCH64) + return reinterpret_cast(get_metadata()); +#else + // Other platforms do not make use of the cache yet, + // and are not as careful with maintaining the invariant + // that the metadata either is nullptr or ObjectMonitor*. + return nullptr; +#endif +} + +inline void BasicLock::clear_object_monitor_cache() { + assert(UseObjectMonitorTable, "must be"); + set_metadata(0); +} + +inline void BasicLock::set_object_monitor_cache(ObjectMonitor* mon) { + assert(UseObjectMonitorTable, "must be"); + set_metadata(reinterpret_cast(mon)); +} + +#endif // SHARE_RUNTIME_BASICLOCK_INLINE_HPP diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index cf82ad7c027..7961e56598f 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -63,6 +63,7 @@ #include "prims/methodHandles.hpp" #include "prims/vectorSupport.hpp" #include "runtime/atomic.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" #include "runtime/deoptimization.hpp" @@ -75,6 +76,8 @@ #include "runtime/javaThread.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/keepStackGCProcessed.hpp" +#include "runtime/lightweightSynchronizer.hpp" +#include "runtime/lockStack.inline.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/osThread.hpp" #include "runtime/safepointVerifiers.hpp" @@ -84,7 +87,7 @@ #include "runtime/stackValue.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/threadSMR.hpp" #include "runtime/threadWXSetters.inline.hpp" #include "runtime/vframe.hpp" @@ -1634,22 +1637,37 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArraycurrent_waiting_monitor(); if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) { assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization"); - mon_info->lock()->set_displaced_header(markWord::unused_mark()); + if (LockingMode == LM_LEGACY) { + mon_info->lock()->set_displaced_header(markWord::unused_mark()); + } else if (UseObjectMonitorTable) { + mon_info->lock()->clear_object_monitor_cache(); + } +#ifdef ASSERT + else { + assert(LockingMode == LM_MONITOR || !UseObjectMonitorTable, "must be"); + mon_info->lock()->set_bad_metadata_deopt(); + } +#endif JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread); continue; } } } + BasicLock* lock = mon_info->lock(); if (LockingMode == LM_LIGHTWEIGHT) { // We have lost information about the correct state of the lock stack. - // Inflate the locks instead. Enter then inflate to avoid races with - // deflation. - ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread); + // Entering may create an invalid lock stack. Inflate the lock if it + // was fast_locked to restore the valid lock stack. + ObjectSynchronizer::enter_for(obj, lock, deoptee_thread); + if (deoptee_thread->lock_stack().contains(obj())) { + LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal, + deoptee_thread, thread); + } assert(mon_info->owner()->is_locked(), "object must be locked now"); - ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal); - assert(mon->owner() == deoptee_thread, "must be"); + assert(obj->mark().has_monitor(), "must be"); + assert(!deoptee_thread->lock_stack().contains(obj()), "must be"); + assert(ObjectSynchronizer::read_monitor(thread, obj(), obj->mark())->owner() == deoptee_thread, "must be"); } else { - BasicLock* lock = mon_info->lock(); ObjectSynchronizer::enter_for(obj, lock, deoptee_thread); assert(mon_info->owner()->is_locked(), "object must be locked now"); } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 61efc0b9376..d442894798b 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1956,6 +1956,17 @@ const int ObjectAlignmentInBytes = 8; "2: monitors & new lightweight locking (LM_LIGHTWEIGHT, default)") \ range(0, 2) \ \ + product(bool, UseObjectMonitorTable, false, DIAGNOSTIC, \ + "With Lightweight Locking mode, use a table to record inflated " \ + "monitors rather than the first word of the object.") \ + \ + product(int, LightweightFastLockingSpins, 13, DIAGNOSTIC, \ + "Specifies the number of times lightweight fast locking will " \ + "attempt to CAS the markWord before inflating. Between each " \ + "CAS it will spin for exponentially more time, resulting in " \ + "a total number of spins on the order of O(2^value)") \ + range(1, 30) \ + \ product(uint, TrimNativeHeapInterval, 0, \ "Interval, in ms, at which the JVM will trim the native heap if " \ "the platform supports that. Lower values will reclaim memory " \ diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index b69ab708afa..0c00ef75c92 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -504,7 +504,8 @@ JavaThread::JavaThread(MEMFLAGS flags) : _SleepEvent(ParkEvent::Allocate(this)), - _lock_stack(this) { + _lock_stack(this), + _om_cache(this) { set_jni_functions(jni_functions()); #if INCLUDE_JVMCI @@ -803,6 +804,8 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { elapsedTimer _timer_exit_phase3; elapsedTimer _timer_exit_phase4; + om_clear_monitor_cache(); + if (log_is_enabled(Debug, os, thread, timer)) { _timer_exit_phase1.start(); } diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 755a8268864..54a11dad9b8 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -61,6 +61,7 @@ class JvmtiSampledObjectAllocEventCollector; class JvmtiThreadState; class Metadata; +class ObjectMonitor; class OopHandleList; class OopStorage; class OSThread; @@ -1165,6 +1166,7 @@ class JavaThread: public Thread { private: LockStack _lock_stack; + OMCache _om_cache; public: LockStack& lock_stack() { return _lock_stack; } @@ -1176,6 +1178,13 @@ class JavaThread: public Thread { static ByteSize lock_stack_top_offset() { return lock_stack_offset() + LockStack::top_offset(); } static ByteSize lock_stack_base_offset() { return lock_stack_offset() + LockStack::base_offset(); } + static ByteSize om_cache_offset() { return byte_offset_of(JavaThread, _om_cache); } + static ByteSize om_cache_oops_offset() { return om_cache_offset() + OMCache::entries_offset(); } + + void om_set_monitor_cache(ObjectMonitor* monitor); + void om_clear_monitor_cache(); + ObjectMonitor* om_get_from_monitor_cache(oop obj); + static OopStorage* thread_oop_storage(); static void verify_cross_modify_fence_failure(JavaThread *thread) PRODUCT_RETURN; diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp index a51a30ae577..fabb589c2b5 100644 --- a/src/hotspot/share/runtime/javaThread.inline.hpp +++ b/src/hotspot/share/runtime/javaThread.inline.hpp @@ -36,7 +36,9 @@ #include "runtime/atomic.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" +#include "runtime/lockStack.inline.hpp" #include "runtime/nonJavaThread.hpp" +#include "runtime/objectMonitor.inline.hpp" #include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" @@ -239,4 +241,25 @@ inline InstanceKlass* JavaThread::class_to_be_initialized() const { return _class_to_be_initialized; } +inline void JavaThread::om_set_monitor_cache(ObjectMonitor* monitor) { + assert(UseObjectMonitorTable, "must be"); + assert(monitor != nullptr, "use om_clear_monitor_cache to clear"); + assert(this == current() || monitor->owner_raw() == this, "only add owned monitors for other threads"); + assert(this == current() || is_obj_deopt_suspend(), "thread must not run concurrently"); + + _om_cache.set_monitor(monitor); +} + +inline void JavaThread::om_clear_monitor_cache() { + if (UseObjectMonitorTable) { + _om_cache.clear(); + } +} + +inline ObjectMonitor* JavaThread::om_get_from_monitor_cache(oop obj) { + assert(obj != nullptr, "do not look for null objects"); + assert(this == current(), "only get own thread locals"); + return _om_cache.get_monitor(obj); +} + #endif // SHARE_RUNTIME_JAVATHREAD_INLINE_HPP diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.cpp b/src/hotspot/share/runtime/lightweightSynchronizer.cpp new file mode 100644 index 00000000000..0e360dba97b --- /dev/null +++ b/src/hotspot/share/runtime/lightweightSynchronizer.cpp @@ -0,0 +1,1223 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "classfile/vmSymbols.hpp" +#include "jfrfiles/jfrEventClasses.hpp" +#include "logging/log.hpp" +#include "memory/allStatic.hpp" +#include "memory/resourceArea.hpp" +#include "nmt/memflags.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" +#include "runtime/basicLock.inline.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/javaThread.inline.hpp" +#include "runtime/lightweightSynchronizer.hpp" +#include "runtime/lockStack.inline.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/objectMonitor.inline.hpp" +#include "runtime/os.hpp" +#include "runtime/perfData.inline.hpp" +#include "runtime/safepointMechanism.inline.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "runtime/synchronizer.inline.hpp" +#include "runtime/timerTrace.hpp" +#include "runtime/trimNativeHeap.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" +#include "utilities/globalDefinitions.hpp" + +// ConcurrentHashTable storing links from objects to ObjectMonitors +class ObjectMonitorTable : AllStatic { + struct Config { + using Value = ObjectMonitor*; + static uintx get_hash(Value const& value, bool* is_dead) { + return (uintx)value->hash(); + } + static void* allocate_node(void* context, size_t size, Value const& value) { + ObjectMonitorTable::inc_items_count(); + return AllocateHeap(size, MEMFLAGS::mtObjectMonitor); + }; + static void free_node(void* context, void* memory, Value const& value) { + ObjectMonitorTable::dec_items_count(); + FreeHeap(memory); + } + }; + using ConcurrentTable = ConcurrentHashTable; + + static ConcurrentTable* _table; + static volatile size_t _items_count; + static size_t _table_size; + static volatile bool _resize; + + class Lookup : public StackObj { + oop _obj; + + public: + explicit Lookup(oop obj) : _obj(obj) {} + + uintx get_hash() const { + uintx hash = _obj->mark().hash(); + assert(hash != 0, "should have a hash"); + return hash; + } + + bool equals(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return (*value)->object_refers_to(_obj); + } + + bool is_dead(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return false; + } + }; + + class LookupMonitor : public StackObj { + ObjectMonitor* _monitor; + + public: + explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {} + + uintx get_hash() const { + return _monitor->hash(); + } + + bool equals(ObjectMonitor** value) { + return (*value) == _monitor; + } + + bool is_dead(ObjectMonitor** value) { + assert(*value != nullptr, "must be"); + return (*value)->object_is_dead(); + } + }; + + static void inc_items_count() { + Atomic::inc(&_items_count); + } + + static void dec_items_count() { + Atomic::dec(&_items_count); + } + + static double get_load_factor() { + return (double)_items_count / (double)_table_size; + } + + static size_t table_size(Thread* current = Thread::current()) { + return ((size_t)1) << _table->get_size_log2(current); + } + + static size_t max_log_size() { + // TODO[OMTable]: Evaluate the max size. + // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity(); + // Using MaxHeapSize directly this early may be wrong, and there + // are definitely rounding errors (alignment). + const size_t max_capacity = MaxHeapSize; + const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize; + const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast(min_object_size)); + const size_t log_max_objects = log2i_graceful(max_objects); + + return MAX2(MIN2(SIZE_BIG_LOG2, log_max_objects), min_log_size()); + } + + static size_t min_log_size() { + // ~= log(AvgMonitorsPerThreadEstimate default) + return 10; + } + + template + static size_t clamp_log_size(V log_size) { + return MAX2(MIN2(log_size, checked_cast(max_log_size())), checked_cast(min_log_size())); + } + + static size_t initial_log_size() { + const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1))); + return clamp_log_size(estimate); + } + + static size_t grow_hint () { + return ConcurrentTable::DEFAULT_GROW_HINT; + } + + public: + static void create() { + _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint()); + _items_count = 0; + _table_size = table_size(); + _resize = false; + } + + static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) { +#ifdef ASSERT + if (SafepointSynchronize::is_at_safepoint()) { + bool has_monitor = obj->mark().has_monitor(); + assert(has_monitor == (monitor != nullptr), + "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT, + BOOL_TO_STR(has_monitor), p2i(monitor)); + } +#endif + } + + static ObjectMonitor* monitor_get(Thread* current, oop obj) { + ObjectMonitor* result = nullptr; + Lookup lookup_f(obj); + auto found_f = [&](ObjectMonitor** found) { + assert((*found)->object_peek() == obj, "must be"); + result = *found; + }; + _table->get(current, lookup_f, found_f); + verify_monitor_get_result(obj, result); + return result; + } + + static void try_notify_grow() { + if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) { + Atomic::store(&_resize, true); + if (Service_lock->try_lock()) { + Service_lock->notify(); + Service_lock->unlock(); + } + } + } + + static bool should_shrink() { + // Not implemented; + return false; + } + + static constexpr double GROW_LOAD_FACTOR = 0.75; + + static bool should_grow() { + return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached(); + } + + static bool should_resize() { + return should_grow() || should_shrink() || Atomic::load(&_resize); + } + + template + static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) { + if (task.prepare(current)) { + log_trace(monitortable)("Started to %s", task_name); + TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf)); + while (task.do_task(current, args...)) { + task.pause(current); + { + ThreadBlockInVM tbivm(current); + } + task.cont(current); + } + task.done(current); + return true; + } + return false; + } + + static bool grow(JavaThread* current) { + ConcurrentTable::GrowTask grow_task(_table); + if (run_task(current, grow_task, "Grow")) { + _table_size = table_size(current); + log_info(monitortable)("Grown to size: %zu", _table_size); + return true; + } + return false; + } + + static bool clean(JavaThread* current) { + ConcurrentTable::BulkDeleteTask clean_task(_table); + auto is_dead = [&](ObjectMonitor** monitor) { + return (*monitor)->object_is_dead(); + }; + auto do_nothing = [&](ObjectMonitor** monitor) {}; + NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable"); + return run_task(current, clean_task, "Clean", is_dead, do_nothing); + } + + static bool resize(JavaThread* current) { + LogTarget(Info, monitortable) lt; + bool success = false; + + if (should_grow()) { + lt.print("Start growing with load factor %f", get_load_factor()); + success = grow(current); + } else { + if (!_table->is_max_size_reached() && Atomic::load(&_resize)) { + lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor()); + } + lt.print("Start cleaning with load factor %f", get_load_factor()); + success = clean(current); + } + + Atomic::store(&_resize, false); + + return success; + } + + static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) { + // Enter the monitor into the concurrent hashtable. + ObjectMonitor* result = monitor; + Lookup lookup_f(obj); + auto found_f = [&](ObjectMonitor** found) { + assert((*found)->object_peek() == obj, "must be"); + result = *found; + }; + bool grow; + _table->insert_get(current, lookup_f, monitor, found_f, &grow); + verify_monitor_get_result(obj, result); + if (grow) { + try_notify_grow(); + } + return result; + } + + static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) { + LookupMonitor lookup_f(monitor); + return _table->remove(current, lookup_f); + } + + static bool contains_monitor(Thread* current, ObjectMonitor* monitor) { + LookupMonitor lookup_f(monitor); + bool result = false; + auto found_f = [&](ObjectMonitor** found) { + result = true; + }; + _table->get(current, lookup_f, found_f); + return result; + } + + static void print_on(outputStream* st) { + auto printer = [&] (ObjectMonitor** entry) { + ObjectMonitor* om = *entry; + oop obj = om->object_peek(); + st->print("monitor=" PTR_FORMAT ", ", p2i(om)); + st->print("object=" PTR_FORMAT, p2i(obj)); + assert(obj->mark().hash() == om->hash(), "hash must match"); + st->cr(); + return true; + }; + if (SafepointSynchronize::is_at_safepoint()) { + _table->do_safepoint_scan(printer); + } else { + _table->do_scan(Thread::current(), printer); + } + } +}; + +ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr; +volatile size_t ObjectMonitorTable::_items_count = 0; +size_t ObjectMonitorTable::_table_size = 0; +volatile bool ObjectMonitorTable::_resize = false; + +ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); + + ObjectMonitor* monitor = get_monitor_from_table(current, object); + if (monitor != nullptr) { + *inserted = false; + return monitor; + } + + ObjectMonitor* alloced_monitor = new ObjectMonitor(object); + alloced_monitor->set_owner_anonymous(); + + // Try insert monitor + monitor = add_monitor(current, alloced_monitor, object); + + *inserted = alloced_monitor == monitor; + if (!*inserted) { + delete alloced_monitor; + } + + return monitor; +} + +static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) { + if (log_is_enabled(Trace, monitorinflation)) { + ResourceMark rm(current); + log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark=" + INTPTR_FORMAT ", type='%s' cause=%s", p2i(object), + object->mark().value(), object->klass()->external_name(), + ObjectSynchronizer::inflate_cause_name(cause)); + } +} + +static void post_monitor_inflate_event(EventJavaMonitorInflate* event, + const oop obj, + ObjectSynchronizer::InflateCause cause) { + assert(event != nullptr, "invariant"); + event->set_monitorClass(obj->klass()); + event->set_address((uintptr_t)(void*)obj); + event->set_cause((u1)cause); + event->commit(); +} + +ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) { + assert(UseObjectMonitorTable, "must be"); + + EventJavaMonitorInflate event; + + bool inserted; + ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted); + + if (inserted) { + // Hopefully the performance counters are allocated on distinct + // cache lines to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + + // The monitor has an anonymous owner so it is safe from async deflation. + ObjectSynchronizer::_in_use_list.add(monitor); + } + + return monitor; +} + +// Add the hashcode to the monitor to match the object and put it in the hashtable. +ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) { + assert(UseObjectMonitorTable, "must be"); + assert(obj == monitor->object(), "must be"); + + intptr_t hash = obj->mark().hash(); + assert(hash != 0, "must be set when claiming the object monitor"); + monitor->set_hash(hash); + + return ObjectMonitorTable::monitor_put_get(current, monitor, obj); +} + +bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) { + assert(UseObjectMonitorTable, "must be"); + assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead"); + + return ObjectMonitorTable::remove_monitor_entry(current, monitor); +} + +void LightweightSynchronizer::deflate_mark_word(oop obj) { + assert(UseObjectMonitorTable, "must be"); + + markWord mark = obj->mark_acquire(); + assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash"); + + while (mark.has_monitor()) { + const markWord new_mark = mark.clear_lock_bits().set_unlocked(); + mark = obj->cas_set_mark(new_mark, mark); + } +} + +void LightweightSynchronizer::initialize() { + if (!UseObjectMonitorTable) { + return; + } + ObjectMonitorTable::create(); +} + +bool LightweightSynchronizer::needs_resize() { + if (!UseObjectMonitorTable) { + return false; + } + return ObjectMonitorTable::should_resize(); +} + +bool LightweightSynchronizer::resize_table(JavaThread* current) { + if (!UseObjectMonitorTable) { + return true; + } + return ObjectMonitorTable::resize(current); +} + +class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure { + private: + oop _contended_oops[LockStack::CAPACITY]; + int _length; + + void do_oop(oop* o) final { + oop obj = *o; + if (obj->mark_acquire().has_monitor()) { + if (_length > 0 && _contended_oops[_length - 1] == obj) { + // Recursive + return; + } + _contended_oops[_length++] = obj; + } + } + + void do_oop(narrowOop* o) final { + ShouldNotReachHere(); + } + + public: + LockStackInflateContendedLocks() : + _contended_oops(), + _length(0) {}; + + void inflate(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + current->lock_stack().oops_do(this); + for (int i = 0; i < _length; i++) { + LightweightSynchronizer:: + inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } +}; + +void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + LockStack& lock_stack = current->lock_stack(); + + // Make room on lock_stack + if (lock_stack.is_full()) { + // Inflate contended objects + LockStackInflateContendedLocks().inflate(current); + if (lock_stack.is_full()) { + // Inflate the oldest object + inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } +} + +class LightweightSynchronizer::CacheSetter : StackObj { + JavaThread* const _thread; + BasicLock* const _lock; + ObjectMonitor* _monitor; + + NONCOPYABLE(CacheSetter); + + public: + CacheSetter(JavaThread* thread, BasicLock* lock) : + _thread(thread), + _lock(lock), + _monitor(nullptr) {} + + ~CacheSetter() { + // Only use the cache if using the table. + if (UseObjectMonitorTable) { + if (_monitor != nullptr) { + _thread->om_set_monitor_cache(_monitor); + _lock->set_object_monitor_cache(_monitor); + } else { + _lock->clear_object_monitor_cache(); + } + } + } + + void set_monitor(ObjectMonitor* monitor) { + assert(_monitor == nullptr, "only set once"); + _monitor = monitor; + } + +}; + +class LightweightSynchronizer::VerifyThreadState { + bool _no_safepoint; + + public: + VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) { + assert(current == Thread::current(), "must be"); + assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently"); + if (_no_safepoint) { + DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();) + } + } + ~VerifyThreadState() { + if (_no_safepoint){ + DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();) + } + } +}; + +inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) { + markWord mark = obj->mark(); + while (mark.is_unlocked()) { + ensure_lock_stack_space(current); + assert(!lock_stack.is_full(), "must have made room on the lock stack"); + assert(!lock_stack.contains(obj), "thread must not already hold the lock"); + // Try to swing into 'fast-locked' state. + markWord locked_mark = mark.set_fast_locked(); + markWord old_mark = mark; + mark = obj->cas_set_mark(locked_mark, old_mark); + if (old_mark == mark) { + // Successfully fast-locked, push object to lock-stack and return. + lock_stack.push(obj); + return true; + } + } + return false; +} + +bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) { + assert(UseObjectMonitorTable, "must be"); + // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins. + const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1; + const int log_min_safepoint_check_interval = 10; + + markWord mark = obj->mark(); + const auto should_spin = [&]() { + if (!mark.has_monitor()) { + // Spin while not inflated. + return true; + } else if (observed_deflation) { + // Spin while monitor is being deflated. + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + return monitor == nullptr || monitor->is_being_async_deflated(); + } + // Else stop spinning. + return false; + }; + // Always attempt to lock once even when safepoint synchronizing. + bool should_process = false; + for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) { + // Spin with exponential backoff. + const int total_spin_count = 1 << i; + const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count); + const int outer_spin_count = total_spin_count / inner_spin_count; + for (int outer = 0; outer < outer_spin_count; outer++) { + should_process = SafepointMechanism::should_process(current); + if (should_process) { + // Stop spinning for safepoint. + break; + } + for (int inner = 1; inner < inner_spin_count; inner++) { + SpinPause(); + } + } + + if (fast_lock_try_enter(obj, lock_stack, current)) return true; + } + return false; +} + +void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); + JavaThread* current = JavaThread::current(); + VerifyThreadState vts(locking_thread, current); + + if (obj->klass()->is_value_based()) { + ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread); + } + + locking_thread->inc_held_monitor_count(); + + CacheSetter cache_setter(locking_thread, lock); + + LockStack& lock_stack = locking_thread->lock_stack(); + + ObjectMonitor* monitor = nullptr; + if (lock_stack.contains(obj())) { + monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); + bool entered = monitor->enter_for(locking_thread); + assert(entered, "recursive ObjectMonitor::enter_for must succeed"); + } else { + // It is assumed that enter_for must enter on an object without contention. + monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current); + } + + assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed"); + cache_setter.set_monitor(monitor); +} + +void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); + assert(current == JavaThread::current(), "must be"); + + if (obj->klass()->is_value_based()) { + ObjectSynchronizer::handle_sync_on_value_based_class(obj, current); + } + + current->inc_held_monitor_count(); + + CacheSetter cache_setter(current, lock); + + // Used when deflation is observed. Progress here requires progress + // from the deflator. After observing that the deflator is not + // making progress (after two yields), switch to sleeping. + SpinYield spin_yield(0, 2); + bool observed_deflation = false; + + LockStack& lock_stack = current->lock_stack(); + + if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) { + // Recursively fast locked + return; + } + + if (lock_stack.contains(obj())) { + ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current); + bool entered = monitor->enter(current); + assert(entered, "recursive ObjectMonitor::enter must succeed"); + cache_setter.set_monitor(monitor); + return; + } + + while (true) { + // Fast-locking does not use the 'lock' argument. + // Fast-lock spinning to avoid inflating for short critical sections. + // The goal is to only inflate when the extra cost of using ObjectMonitors + // is worth it. + // If deflation has been observed we also spin while deflation is ongoing. + if (fast_lock_try_enter(obj(), lock_stack, current)) { + return; + } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) { + return; + } + + if (observed_deflation) { + spin_yield.wait(); + } + + ObjectMonitor* monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current); + if (monitor != nullptr) { + cache_setter.set_monitor(monitor); + return; + } + + // If inflate_and_enter returns nullptr it is because a deflated monitor + // was encountered. Fallback to fast locking. The deflater is responsible + // for clearing out the monitor and transitioning the markWord back to + // fast locking. + observed_deflation = true; + } +} + +void LightweightSynchronizer::exit(oop object, JavaThread* current) { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); + assert(current == Thread::current(), "must be"); + + markWord mark = object->mark(); + assert(!mark.is_unlocked(), "must be"); + + LockStack& lock_stack = current->lock_stack(); + if (mark.is_fast_locked()) { + if (lock_stack.try_recursive_exit(object)) { + // This is a recursive exit which succeeded + return; + } + if (lock_stack.is_recursive(object)) { + // Must inflate recursive locks if try_recursive_exit fails + // This happens for un-structured unlocks, could potentially + // fix try_recursive_exit to handle these. + inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current); + } + } + + while (mark.is_fast_locked()) { + markWord unlocked_mark = mark.set_unlocked(); + markWord old_mark = mark; + mark = object->cas_set_mark(unlocked_mark, old_mark); + if (old_mark == mark) { + // CAS successful, remove from lock_stack + size_t recursion = lock_stack.remove(object) - 1; + assert(recursion == 0, "Should not have unlocked here"); + return; + } + } + + assert(mark.has_monitor(), "must be"); + // The monitor exists + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark); + if (monitor->is_owner_anonymous()) { + assert(current->lock_stack().contains(object), "current must have object on its lock stack"); + monitor->set_owner_from_anonymous(current); + monitor->set_recursions(current->lock_stack().remove(object) - 1); + } + + monitor->exit(current); +} + +// LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated +// ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require +// an inflated ObjectMonitor* for a monitor, and expects to throw a +// java.lang.IllegalMonitorStateException if it is not held by the current +// thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant +// that it only inflates if it is already locked by the current thread or the +// current thread is in the process of entering. To maintain this invariant we +// need to throw a java.lang.IllegalMonitorStateException before inflating if +// the current thread is not the owner. +// LightweightSynchronizer::inflate_locked_or_imse facilitates this. +ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) { + assert(LockingMode == LM_LIGHTWEIGHT, "must be"); + JavaThread* current = THREAD; + + for (;;) { + markWord mark = obj->mark_acquire(); + if (mark.is_unlocked()) { + // No lock, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } + + if (mark.is_fast_locked()) { + if (!current->lock_stack().contains(obj)) { + // Fast locked by other thread, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } else { + // Current thread owns the lock, must inflate + return inflate_fast_locked_object(obj, cause, current, current); + } + } + + assert(mark.has_monitor(), "must be"); + ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark); + if (monitor != nullptr) { + if (monitor->is_owner_anonymous()) { + LockStack& lock_stack = current->lock_stack(); + if (lock_stack.contains(obj)) { + // Current thread owns the lock but someone else inflated it. + // Fix owner and pop lock stack. + monitor->set_owner_from_anonymous(current); + monitor->set_recursions(lock_stack.remove(obj) - 1); + } else { + // Fast locked (and inflated) by other thread, or deflation in progress, IMSE. + THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(), + "current thread is not owner", nullptr); + } + } + return monitor; + } + } +} + +ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* inflating_thread, Thread* current) { + + // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires + // that the inflating_thread == Thread::current() or is suspended throughout the call by + // some other mechanism. + // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non + // JavaThread. (As may still be the case from FastHashCode). However it is only + // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread + // is set when called from ObjectSynchronizer::enter from the owning thread, + // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. + EventJavaMonitorInflate event; + + for (;;) { + const markWord mark = object->mark_acquire(); + + // The mark can be in one of the following states: + // * inflated - Just return if using stack-locking. + // If using fast-locking and the ObjectMonitor owner + // is anonymous and the inflating_thread owns the + // object lock, then we make the inflating_thread + // the ObjectMonitor owner and remove the lock from + // the inflating_thread's lock stack. + // * fast-locked - Coerce it to inflated from fast-locked. + // * unlocked - Aggressively inflate the object. + + // CASE: inflated + if (mark.has_monitor()) { + ObjectMonitor* inf = mark.monitor(); + markWord dmw = inf->header(); + assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); + if (inf->is_owner_anonymous() && + inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) { + inf->set_owner_from_anonymous(inflating_thread); + size_t removed = inflating_thread->lock_stack().remove(object); + inf->set_recursions(removed - 1); + } + return inf; + } + + // CASE: fast-locked + // Could be fast-locked either by the inflating_thread or by some other thread. + // + // Note that we allocate the ObjectMonitor speculatively, _before_ + // attempting to set the object's mark to the new ObjectMonitor. If + // the inflating_thread owns the monitor, then we set the ObjectMonitor's + // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner + // to anonymous. If we lose the race to set the object's mark to the + // new ObjectMonitor, then we just delete it and loop around again. + // + if (mark.is_fast_locked()) { + ObjectMonitor* monitor = new ObjectMonitor(object); + monitor->set_header(mark.set_unlocked()); + bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object); + if (own) { + // Owned by inflating_thread. + monitor->set_owner_from(nullptr, inflating_thread); + } else { + // Owned by somebody else. + monitor->set_owner_anonymous(); + } + markWord monitor_mark = markWord::encode(monitor); + markWord old_mark = object->cas_set_mark(monitor_mark, mark); + if (old_mark == mark) { + // Success! Return inflated monitor. + if (own) { + size_t removed = inflating_thread->lock_stack().remove(object); + monitor->set_recursions(removed - 1); + } + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + ObjectSynchronizer::_in_use_list.add(monitor); + + // Hopefully the performance counters are allocated on distinct + // cache lines to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + return monitor; + } else { + delete monitor; + continue; // Interference -- just retry + } + } + + // CASE: unlocked + // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. + // If we know we're inflating for entry it's better to inflate by swinging a + // pre-locked ObjectMonitor pointer into the object header. A successful + // CAS inflates the object *and* confers ownership to the inflating thread. + // In the current implementation we use a 2-step mechanism where we CAS() + // to inflate and then CAS() again to try to swing _owner from null to current. + // An inflateTry() method that we could call from enter() would be useful. + + assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value()); + ObjectMonitor* m = new ObjectMonitor(object); + // prepare m for installation - set monitor to initial state + m->set_header(mark); + + if (object->cas_set_mark(markWord::encode(m), mark) != mark) { + delete m; + m = nullptr; + continue; + // interference - the markword changed - just retry. + // The state-transitions are one-way, so there's no chance of + // live-lock -- "Inflated" is an absorbing state. + } + + // Once the ObjectMonitor is configured and object is associated + // with the ObjectMonitor, it is safe to allow async deflation: + ObjectSynchronizer::_in_use_list.add(m); + + // Hopefully the performance counters are allocated on distinct + // cache lines to avoid false sharing on MP systems ... + OM_PERFDATA_OP(Inflations, inc()); + log_inflate(current, object, cause); + if (event.should_commit()) { + post_monitor_inflate_event(&event, object, cause); + } + return m; + } +} + +ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { + assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight"); + VerifyThreadState vts(locking_thread, current); + assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack"); + + ObjectMonitor* monitor; + + if (!UseObjectMonitorTable) { + return inflate_into_object_header(object, cause, locking_thread, current); + } + + // Inflating requires a hash code + ObjectSynchronizer::FastHashCode(current, object); + + markWord mark = object->mark_acquire(); + assert(!mark.is_unlocked(), "Cannot be unlocked"); + + for (;;) { + // Fetch the monitor from the table + monitor = get_or_insert_monitor(object, current, cause); + + // ObjectMonitors are always inserted as anonymously owned, this thread is + // the current holder of the monitor. So unless the entry is stale and + // contains a deflating monitor it must be anonymously owned. + if (monitor->is_owner_anonymous()) { + // The monitor must be anonymously owned if it was added + assert(monitor == get_monitor_from_table(current, object), "The monitor must be found"); + // New fresh monitor + break; + } + + // If the monitor was not anonymously owned then we got a deflating monitor + // from the table. We need to let the deflator make progress and remove this + // entry before we are allowed to add a new one. + os::naked_yield(); + assert(monitor->is_being_async_deflated(), "Should be the reason"); + } + + // Set the mark word; loop to handle concurrent updates to other parts of the mark word + while (mark.is_fast_locked()) { + mark = object->cas_set_mark(mark.set_has_monitor(), mark); + } + + // Indicate that the monitor now has a known owner + monitor->set_owner_from_anonymous(locking_thread); + + // Remove the entry from the thread's lock stack + monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1); + + if (locking_thread == current) { + // Only change the thread local state of the current thread. + locking_thread->om_set_monitor_cache(monitor); + } + + return monitor; +} + +ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) { + assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight"); + VerifyThreadState vts(locking_thread, current); + + // Note: In some paths (deoptimization) the 'current' thread inflates and + // enters the lock on behalf of the 'locking_thread' thread. + + ObjectMonitor* monitor = nullptr; + + if (!UseObjectMonitorTable) { + // Do the old inflate and enter. + monitor = inflate_into_object_header(object, cause, locking_thread, current); + + bool entered; + if (locking_thread == current) { + entered = monitor->enter(locking_thread); + } else { + entered = monitor->enter_for(locking_thread); + } + + // enter returns false for deflation found. + return entered ? monitor : nullptr; + } + + NoSafepointVerifier nsv; + + // Lightweight monitors require that hash codes are installed first + ObjectSynchronizer::FastHashCode(locking_thread, object); + + // Try to get the monitor from the thread-local cache. + // There's no need to use the cache if we are locking + // on behalf of another thread. + if (current == locking_thread) { + monitor = current->om_get_from_monitor_cache(object); + } + + // Get or create the monitor + if (monitor == nullptr) { + monitor = get_or_insert_monitor(object, current, cause); + } + + if (monitor->try_enter(locking_thread)) { + return monitor; + } + + // Holds is_being_async_deflated() stable throughout this function. + ObjectMonitorContentionMark contention_mark(monitor); + + /// First handle the case where the monitor from the table is deflated + if (monitor->is_being_async_deflated()) { + // The MonitorDeflation thread is deflating the monitor. The locking thread + // must spin until further progress has been made. + + const markWord mark = object->mark_acquire(); + + if (mark.has_monitor()) { + // Waiting on the deflation thread to remove the deflated monitor from the table. + os::naked_yield(); + + } else if (mark.is_fast_locked()) { + // Some other thread managed to fast-lock the lock, or this is a + // recursive lock from the same thread; yield for the deflation + // thread to remove the deflated monitor from the table. + os::naked_yield(); + + } else { + assert(mark.is_unlocked(), "Implied"); + // Retry immediately + } + + // Retry + return nullptr; + } + + for (;;) { + const markWord mark = object->mark_acquire(); + // The mark can be in one of the following states: + // * inflated - If the ObjectMonitor owner is anonymous + // and the locking_thread owns the object + // lock, then we make the locking_thread + // the ObjectMonitor owner and remove the + // lock from the locking_thread's lock stack. + // * fast-locked - Coerce it to inflated from fast-locked. + // * neutral - Inflate the object. Successful CAS is locked + + // CASE: inflated + if (mark.has_monitor()) { + LockStack& lock_stack = locking_thread->lock_stack(); + if (monitor->is_owner_anonymous() && lock_stack.contains(object)) { + // The lock is fast-locked by the locking thread, + // convert it to a held monitor with a known owner. + monitor->set_owner_from_anonymous(locking_thread); + monitor->set_recursions(lock_stack.remove(object) - 1); + } + + break; // Success + } + + // CASE: fast-locked + // Could be fast-locked either by locking_thread or by some other thread. + // + if (mark.is_fast_locked()) { + markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); + if (old_mark != mark) { + // CAS failed + continue; + } + + // Success! Return inflated monitor. + LockStack& lock_stack = locking_thread->lock_stack(); + if (lock_stack.contains(object)) { + // The lock is fast-locked by the locking thread, + // convert it to a held monitor with a known owner. + monitor->set_owner_from_anonymous(locking_thread); + monitor->set_recursions(lock_stack.remove(object) - 1); + } + + break; // Success + } + + // CASE: neutral (unlocked) + + // Catch if the object's header is not neutral (not locked and + // not marked is what we care about here). + assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); + markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark); + if (old_mark != mark) { + // CAS failed + continue; + } + + // Transitioned from unlocked to monitor means locking_thread owns the lock. + monitor->set_owner_from_anonymous(locking_thread); + + return monitor; + } + + if (current == locking_thread) { + // One round of spinning + if (monitor->spin_enter(locking_thread)) { + return monitor; + } + + // Monitor is contended, take the time before entering to fix the lock stack. + LockStackInflateContendedLocks().inflate(current); + } + + // enter can block for safepoints; clear the unhandled object oop + PauseNoSafepointVerifier pnsv(&nsv); + object = nullptr; + + if (current == locking_thread) { + monitor->enter_with_contention_mark(locking_thread, contention_mark); + } else { + monitor->enter_for_with_contention_mark(locking_thread, contention_mark); + } + + return monitor; +} + +void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) { + if (obj != nullptr) { + deflate_mark_word(obj); + } + bool removed = remove_monitor(current, monitor, obj); + if (obj != nullptr) { + assert(removed, "Should have removed the entry if obj was alive"); + } +} + +ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) { + assert(UseObjectMonitorTable, "must be"); + return ObjectMonitorTable::monitor_get(current, obj); +} + +bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) { + assert(UseObjectMonitorTable, "must be"); + return ObjectMonitorTable::contains_monitor(current, monitor); +} + +bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { + assert(current->thread_state() == _thread_in_Java, "must be"); + assert(obj != nullptr, "must be"); + NoSafepointVerifier nsv; + + // If quick_enter succeeds with entering, the cache should be in a valid initialized state. + CacheSetter cache_setter(current, lock); + + LockStack& lock_stack = current->lock_stack(); + if (lock_stack.is_full()) { + // Always go into runtime if the lock stack is full. + return false; + } + + const markWord mark = obj->mark(); + +#ifndef _LP64 + // Only for 32bit which has limited support for fast locking outside the runtime. + if (lock_stack.try_recursive_enter(obj)) { + // Recursive lock successful. + current->inc_held_monitor_count(); + return true; + } + + if (mark.is_unlocked()) { + markWord locked_mark = mark.set_fast_locked(); + if (obj->cas_set_mark(locked_mark, mark) == mark) { + // Successfully fast-locked, push object to lock-stack and return. + lock_stack.push(obj); + current->inc_held_monitor_count(); + return true; + } + } +#endif + + if (mark.has_monitor()) { + ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) : + ObjectSynchronizer::read_monitor(mark); + + if (monitor == nullptr) { + // Take the slow-path on a cache miss. + return false; + } + + if (monitor->try_enter(current)) { + // ObjectMonitor enter successful. + cache_setter.set_monitor(monitor); + current->inc_held_monitor_count(); + return true; + } + } + + // Slow-path. + return false; +} diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.hpp b/src/hotspot/share/runtime/lightweightSynchronizer.hpp new file mode 100644 index 00000000000..c546988f778 --- /dev/null +++ b/src/hotspot/share/runtime/lightweightSynchronizer.hpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP +#define SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP + +#include "memory/allStatic.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/objectMonitor.hpp" +#include "runtime/synchronizer.hpp" + +class ObjectMonitorTable; + +class LightweightSynchronizer : AllStatic { + private: + static ObjectMonitor* get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted); + static ObjectMonitor* get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause); + + static ObjectMonitor* add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj); + static bool remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj); + + static void deflate_mark_word(oop object); + + static void ensure_lock_stack_space(JavaThread* current); + + class CacheSetter; + class LockStackInflateContendedLocks; + class VerifyThreadState; + + public: + static void initialize(); + + static bool needs_resize(); + static bool resize_table(JavaThread* current); + + private: + static inline bool fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current); + static bool fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation); + + public: + static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread); + static void enter(Handle obj, BasicLock* lock, JavaThread* current); + static void exit(oop object, JavaThread* current); + + static ObjectMonitor* inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* inflating_thread, Thread* current); + static ObjectMonitor* inflate_locked_or_imse(oop object, ObjectSynchronizer::InflateCause cause, TRAPS); + static ObjectMonitor* inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); + static ObjectMonitor* inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current); + + static void deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor); + + static ObjectMonitor* get_monitor_from_table(Thread* current, oop obj); + + static bool contains_monitor(Thread* current, ObjectMonitor* monitor); + + static bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current); +}; + +#endif // SHARE_RUNTIME_LIGHTWEIGHTSYNCHRONIZER_HPP diff --git a/src/hotspot/share/runtime/lockStack.cpp b/src/hotspot/share/runtime/lockStack.cpp index 2f783c97c99..185fc931b67 100644 --- a/src/hotspot/share/runtime/lockStack.cpp +++ b/src/hotspot/share/runtime/lockStack.cpp @@ -29,17 +29,20 @@ #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/globals.hpp" +#include "runtime/javaThread.inline.hpp" #include "runtime/lockStack.inline.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/stackWatermark.hpp" #include "runtime/stackWatermarkSet.inline.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/thread.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/growableArray.hpp" #include "utilities/ostream.hpp" +#include "utilities/sizes.hpp" #include @@ -114,3 +117,11 @@ void LockStack::print_on(outputStream* st) { } } } + +OMCache::OMCache(JavaThread* jt) : _entries() { + STATIC_ASSERT(std::is_standard_layout::value); + STATIC_ASSERT(std::is_standard_layout::value); + STATIC_ASSERT(offsetof(OMCache, _null_sentinel) == offsetof(OMCache, _entries) + + offsetof(OMCache::OMCacheEntry, _oop) + + OMCache::CAPACITY * in_bytes(oop_to_oop_difference())); +} diff --git a/src/hotspot/share/runtime/lockStack.hpp b/src/hotspot/share/runtime/lockStack.hpp index 064f70a37fb..de32eb41259 100644 --- a/src/hotspot/share/runtime/lockStack.hpp +++ b/src/hotspot/share/runtime/lockStack.hpp @@ -32,18 +32,20 @@ #include "utilities/sizes.hpp" class JavaThread; +class ObjectMonitor; class OopClosure; class outputStream; template class GrowableArray; +class Thread; class LockStack { friend class LockStackTest; friend class VMStructs; JVMCI_ONLY(friend class JVMCIVMStructs;) -public: + public: static const int CAPACITY = 8; -private: + private: // TODO: It would be very useful if JavaThread::lock_stack_offset() and friends were constexpr, // but this is currently not the case because we're using offset_of() which is non-constexpr, @@ -73,7 +75,7 @@ class LockStack { // Given an offset (in bytes) calculate the index into the lock-stack. static inline int to_index(uint32_t offset); -public: + public: static ByteSize top_offset() { return byte_offset_of(LockStack, _top); } static ByteSize base_offset() { return byte_offset_of(LockStack, _base); } @@ -123,4 +125,29 @@ class LockStack { void print_on(outputStream* st); }; +class OMCache { + friend class VMStructs; + public: + static constexpr int CAPACITY = 8; + + private: + struct OMCacheEntry { + oop _oop = nullptr; + ObjectMonitor* _monitor = nullptr; + } _entries[CAPACITY]; + const oop _null_sentinel = nullptr; + + public: + static ByteSize entries_offset() { return byte_offset_of(OMCache, _entries); } + static constexpr ByteSize oop_to_oop_difference() { return in_ByteSize(sizeof(OMCacheEntry)); } + static constexpr ByteSize oop_to_monitor_difference() { return in_ByteSize(sizeof(oop)); } + + explicit OMCache(JavaThread* jt); + + inline ObjectMonitor* get_monitor(oop o); + inline void set_monitor(ObjectMonitor* monitor); + inline void clear(); + +}; + #endif // SHARE_RUNTIME_LOCKSTACK_HPP diff --git a/src/hotspot/share/runtime/lockStack.inline.hpp b/src/hotspot/share/runtime/lockStack.inline.hpp index 7a9874a9291..515ca94c741 100644 --- a/src/hotspot/share/runtime/lockStack.inline.hpp +++ b/src/hotspot/share/runtime/lockStack.inline.hpp @@ -31,6 +31,8 @@ #include "memory/iterator.hpp" #include "runtime/javaThread.hpp" +#include "runtime/lightweightSynchronizer.hpp" +#include "runtime/objectMonitor.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/stackWatermark.hpp" #include "runtime/stackWatermarkSet.inline.hpp" @@ -222,4 +224,54 @@ inline void LockStack::oops_do(OopClosure* cl) { verify("post-oops-do"); } +inline void OMCache::set_monitor(ObjectMonitor *monitor) { + const int end = OMCache::CAPACITY - 1; + + oop obj = monitor->object_peek(); + assert(obj != nullptr, "must be alive"); + assert(monitor == LightweightSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must exist in table"); + + OMCacheEntry to_insert = {obj, monitor}; + + for (int i = 0; i < end; ++i) { + if (_entries[i]._oop == obj || + _entries[i]._monitor == nullptr || + _entries[i]._monitor->is_being_async_deflated()) { + // Use stale slot. + _entries[i] = to_insert; + return; + } + // Swap with the most recent value. + ::swap(to_insert, _entries[i]); + } + _entries[end] = to_insert; +} + +inline ObjectMonitor* OMCache::get_monitor(oop o) { + for (int i = 0; i < CAPACITY; ++i) { + if (_entries[i]._oop == o) { + assert(_entries[i]._monitor != nullptr, "monitor must exist"); + if (_entries[i]._monitor->is_being_async_deflated()) { + // Bad monitor + // Shift down rest + for (; i < CAPACITY - 1; ++i) { + _entries[i] = _entries[i + 1]; + } + // Clear end + _entries[i] = {}; + return nullptr; + } + return _entries[i]._monitor; + } + } + return nullptr; +} + +inline void OMCache::clear() { + for (size_t i = 0; i < CAPACITY; ++i) { + // Clear + _entries[i] = {}; + } +} + #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index c509ed691cd..367d79a5283 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -43,6 +43,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.inline.hpp" +#include "runtime/lightweightSynchronizer.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" @@ -53,6 +54,7 @@ #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "services/threadService.hpp" +#include "utilities/debug.hpp" #include "utilities/dtrace.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -246,7 +248,7 @@ static void check_object_context() { } ObjectMonitor::ObjectMonitor(oop object) : - _header(markWord::zero()), + _metadata(0), _object(_oop_storage, object), _owner(nullptr), _previous_owner_tid(0), @@ -272,10 +274,6 @@ oop ObjectMonitor::object() const { return _object.resolve(); } -oop ObjectMonitor::object_peek() const { - return _object.peek(); -} - void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) { if (current->is_suspended()) { _om->_recursions = 0; @@ -297,109 +295,147 @@ void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) { } } +#define assert_mark_word_consistency() \ + assert(UseObjectMonitorTable || object()->mark() == markWord::encode(this), \ + "object mark must match encoded this: mark=" INTPTR_FORMAT \ + ", encoded this=" INTPTR_FORMAT, object()->mark().value(), \ + markWord::encode(this).value()); + // ----------------------------------------------------------------------------- // Enter support -bool ObjectMonitor::enter_for(JavaThread* locking_thread) { +bool ObjectMonitor::enter_is_async_deflating() { + if (is_being_async_deflated()) { + if (!UseObjectMonitorTable) { + const oop l_object = object(); + if (l_object != nullptr) { + // Attempt to restore the header/dmw to the object's header so that + // we only retry once if the deflater thread happens to be slow. + install_displaced_markword_in_object(l_object); + } + } + return true; + } + + return false; +} + +void ObjectMonitor::enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark) { // Used by ObjectSynchronizer::enter_for to enter for another thread. // The monitor is private to or already owned by locking_thread which must be suspended. // So this code may only contend with deflation. assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); + assert(contention_mark._monitor == this, "must be"); + assert(!is_being_async_deflated(), "must be"); - // Block out deflation as soon as possible. - add_to_contentions(1); + + void* prev_owner = try_set_owner_from(nullptr, locking_thread); bool success = false; - if (!is_being_async_deflated()) { - void* prev_owner = try_set_owner_from(nullptr, locking_thread); - if (prev_owner == nullptr) { - assert(_recursions == 0, "invariant"); - success = true; - } else if (prev_owner == locking_thread) { - _recursions++; - success = true; - } else if (prev_owner == DEFLATER_MARKER) { - // Racing with deflation. - prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread); - if (prev_owner == DEFLATER_MARKER) { - // Cancelled deflation. Increment contentions as part of the deflation protocol. - add_to_contentions(1); - success = true; - } else if (prev_owner == nullptr) { - // At this point we cannot race with deflation as we have both incremented - // contentions, seen contention > 0 and seen a DEFLATER_MARKER. - // success will only be false if this races with something other than - // deflation. - prev_owner = try_set_owner_from(nullptr, locking_thread); - success = prev_owner == nullptr; - } - } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) { - assert(_recursions == 0, "must be"); - _recursions = 1; - set_owner_from_BasicLock(prev_owner, locking_thread); + if (prev_owner == nullptr) { + assert(_recursions == 0, "invariant"); + success = true; + } else if (prev_owner == locking_thread) { + _recursions++; + success = true; + } else if (prev_owner == DEFLATER_MARKER) { + // Racing with deflation. + prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread); + if (prev_owner == DEFLATER_MARKER) { + // Cancelled deflation. Increment contentions as part of the deflation protocol. + add_to_contentions(1); success = true; + } else if (prev_owner == nullptr) { + // At this point we cannot race with deflation as we have both incremented + // contentions, seen contention > 0 and seen a DEFLATER_MARKER. + // success will only be false if this races with something other than + // deflation. + prev_owner = try_set_owner_from(nullptr, locking_thread); + success = prev_owner == nullptr; } - assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT - ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT, - p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner)); - } else { - // Async deflation is in progress and our contentions increment - // above lost the race to async deflation. Undo the work and - // force the caller to retry. - const oop l_object = object(); - if (l_object != nullptr) { - // Attempt to restore the header/dmw to the object's header so that - // we only retry once if the deflater thread happens to be slow. - install_displaced_markword_in_object(l_object); - } + } else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) { + assert(_recursions == 0, "must be"); + _recursions = 1; + set_owner_from_BasicLock(prev_owner, locking_thread); + success = true; } + assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT + ", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT, + p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner)); +} + +bool ObjectMonitor::enter_for(JavaThread* locking_thread) { - add_to_contentions(-1); + // Block out deflation as soon as possible. + ObjectMonitorContentionMark contention_mark(this); - assert(!success || owner_raw() == locking_thread, "must be"); + // Check for deflation. + if (enter_is_async_deflating()) { + return false; + } - return success; + enter_for_with_contention_mark(locking_thread, contention_mark); + assert(owner_raw() == locking_thread, "must be"); + return true; } -bool ObjectMonitor::enter(JavaThread* current) { - assert(current == JavaThread::current(), "must be"); - // The following code is ordered to check the most common cases first - // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. - - void* cur = try_set_owner_from(nullptr, current); - if (cur == nullptr) { +bool ObjectMonitor::try_enter(JavaThread* current) { + // TryLock avoids the CAS + TryLockResult r = TryLock(current); + if (r == TryLockResult::Success) { assert(_recursions == 0, "invariant"); return true; } - if (cur == current) { - // TODO-FIXME: check for integer overflow! BUGID 6557169. + if (r == TryLockResult::HasOwner && owner() == current) { _recursions++; return true; } - if (LockingMode != LM_LIGHTWEIGHT && current->is_lock_owned((address)cur)) { + void* cur = owner_raw(); + if (LockingMode == LM_LEGACY && current->is_lock_owned((address)cur)) { assert(_recursions == 0, "internal state error"); _recursions = 1; set_owner_from_BasicLock(cur, current); // Convert from BasicLock* to Thread*. return true; } + return false; +} + +bool ObjectMonitor::spin_enter(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + + // Check for recursion. + if (try_enter(current)) { + return true; + } + + // Check for deflation. + if (enter_is_async_deflating()) { + return false; + } + // We've encountered genuine contention. - // Try one round of spinning *before* enqueueing current - // and before going through the awkward and expensive state - // transitions. The following spin is strictly optional ... + // Do one round of spinning. // Note that if we acquire the monitor from an initial spin // we forgo posting JVMTI events and firing DTRACE probes. if (TrySpin(current)) { assert(owner_raw() == current, "must be current: owner=" INTPTR_FORMAT, p2i(owner_raw())); assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions); - assert(object()->mark() == markWord::encode(this), - "object mark must match encoded this: mark=" INTPTR_FORMAT - ", encoded this=" INTPTR_FORMAT, object()->mark().value(), - markWord::encode(this).value()); + assert_mark_word_consistency(); + return true; + } + + return false; +} + +bool ObjectMonitor::enter(JavaThread* current) { + assert(current == JavaThread::current(), "must be"); + + if (spin_enter(current)) { return true; } @@ -408,22 +444,25 @@ bool ObjectMonitor::enter(JavaThread* current) { assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); assert(current->thread_state() != _thread_blocked, "invariant"); - // Keep track of contention for JVM/TI and M&M queries. - add_to_contentions(1); - if (is_being_async_deflated()) { - // Async deflation is in progress and our contentions increment - // above lost the race to async deflation. Undo the work and - // force the caller to retry. - const oop l_object = object(); - if (l_object != nullptr) { - // Attempt to restore the header/dmw to the object's header so that - // we only retry once if the deflater thread happens to be slow. - install_displaced_markword_in_object(l_object); - } - add_to_contentions(-1); + // Keep is_being_async_deflated stable across the rest of enter + ObjectMonitorContentionMark contention_mark(this); + + // Check for deflation. + if (enter_is_async_deflating()) { return false; } + // At this point this ObjectMonitor cannot be deflated, finish contended enter + enter_with_contention_mark(current, contention_mark); + return true; +} + +void ObjectMonitor::enter_with_contention_mark(JavaThread *current, ObjectMonitorContentionMark &cm) { + assert(current == JavaThread::current(), "must be"); + assert(owner_raw() != current, "must be"); + assert(cm._monitor == this, "must be"); + assert(!is_being_async_deflated(), "must be"); + JFR_ONLY(JfrConditionalFlush flush(current);) EventJavaMonitorEnter event; if (event.is_started()) { @@ -480,14 +519,13 @@ bool ObjectMonitor::enter(JavaThread* current) { // the monitor free and clear. } - add_to_contentions(-1); assert(contentions() >= 0, "must not be negative: contentions=%d", contentions()); // Must either set _recursions = 0 or ASSERT _recursions == 0. assert(_recursions == 0, "invariant"); assert(owner_raw() == current, "invariant"); assert(_succ != current, "invariant"); - assert(object()->mark() == markWord::encode(this), "invariant"); + assert_mark_word_consistency(); // The thread -- now the owner -- is back in vm mode. // Report the glorious news via TI,DTrace and jvmstat. @@ -516,7 +554,6 @@ bool ObjectMonitor::enter(JavaThread* current) { event.commit(); } OM_PERFDATA_OP(ContendedLockAttempts, inc()); - return true; } // Caveat: TryLock() is not necessarily serializing if it returns failure. @@ -549,7 +586,7 @@ ObjectMonitor::TryLockResult ObjectMonitor::TryLock(JavaThread* current) { // (contentions < 0) // Contending threads that see that condition know to retry their operation. // -bool ObjectMonitor::deflate_monitor() { +bool ObjectMonitor::deflate_monitor(Thread* current) { if (is_busy()) { // Easy checks are first - the ObjectMonitor is busy so no deflation. return false; @@ -620,7 +657,11 @@ bool ObjectMonitor::deflate_monitor() { p2i(obj), obj->mark().value(), obj->klass()->external_name()); } + } + if (UseObjectMonitorTable) { + LightweightSynchronizer::deflate_monitor(current, obj, this); + } else if (obj != nullptr) { // Install the old mark word if nobody else has already done it. install_displaced_markword_in_object(obj); } @@ -636,6 +677,7 @@ bool ObjectMonitor::deflate_monitor() { // monitor and by other threads that have detected a race with the // deflation process. void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { + assert(!UseObjectMonitorTable, "ObjectMonitorTable has no dmw"); // This function must only be called when (owner == DEFLATER_MARKER // && contentions <= 0), but we can't guarantee that here because // those values could change when the ObjectMonitor gets moved from @@ -972,12 +1014,11 @@ void ObjectMonitor::EnterI(JavaThread* current) { void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) { assert(current != nullptr, "invariant"); + assert(current->thread_state() != _thread_blocked, "invariant"); assert(currentNode != nullptr, "invariant"); assert(currentNode->_thread == current, "invariant"); assert(_waiters > 0, "invariant"); - assert(object()->mark() == markWord::encode(this), "invariant"); - - assert(current->thread_state() != _thread_blocked, "invariant"); + assert_mark_word_consistency(); for (;;) { ObjectWaiter::TStates v = currentNode->TState; @@ -1042,7 +1083,7 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) { // In addition, current.TState is stable. assert(owner_raw() == current, "invariant"); - assert(object()->mark() == markWord::encode(this), "invariant"); + assert_mark_word_consistency(); UnlinkAfterAcquire(current, currentNode); if (_succ == current) _succ = nullptr; assert(_succ != current, "invariant"); @@ -1668,7 +1709,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // Verify a few postconditions assert(owner_raw() == current, "invariant"); assert(_succ != current, "invariant"); - assert(object()->mark() == markWord::encode(this), "invariant"); + assert_mark_word_consistency(); // check if the notification happened if (!WasNotified) { @@ -2185,7 +2226,7 @@ void ObjectMonitor::print() const { print_on(tty); } // Print the ObjectMonitor like a debugger would: // // (ObjectMonitor) 0x00007fdfb6012e40 = { -// _header = 0x0000000000000001 +// _metadata = 0x0000000000000001 // _object = 0x000000070ff45fd0 // _pad_buf0 = { // [0] = '\0' @@ -2214,7 +2255,7 @@ void ObjectMonitor::print() const { print_on(tty); } // void ObjectMonitor::print_debug_style_on(outputStream* st) const { st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this)); - st->print_cr(" _header = " INTPTR_FORMAT, header().value()); + st->print_cr(" _metadata = " INTPTR_FORMAT, _metadata); st->print_cr(" _object = " INTPTR_FORMAT, p2i(object_peek())); st->print_cr(" _pad_buf0 = {"); st->print_cr(" [0] = '\\0'"); diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index fdc482a3555..ef85559c2b6 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -33,6 +33,7 @@ #include "utilities/checkedCast.hpp" class ObjectMonitor; +class ObjectMonitorContentionMark; class ParkEvent; // ObjectWaiter serves as a "proxy" or surrogate thread. @@ -69,20 +70,21 @@ class ObjectWaiter : public StackObj { // // ObjectMonitor Layout Overview/Highlights/Restrictions: // -// - The _header field must be at offset 0 because the displaced header +// - The _metadata field must be at offset 0 because the displaced header // from markWord is stored there. We do not want markWord.hpp to include // ObjectMonitor.hpp to avoid exposing ObjectMonitor everywhere. This // means that ObjectMonitor cannot inherit from any other class nor can // it use any virtual member functions. This restriction is critical to // the proper functioning of the VM. -// - The _header and _owner fields should be separated by enough space +// - The _metadata and _owner fields should be separated by enough space // to avoid false sharing due to parallel access by different threads. // This is an advisory recommendation. // - The general layout of the fields in ObjectMonitor is: -// _header +// _metadata // // // _owner +// // // - The VM assumes write ordering and machine word alignment with // respect to the _owner field and the that can @@ -106,20 +108,19 @@ class ObjectWaiter : public StackObj { // in synchronizer.cpp. Also see TEST_VM(SynchronizerTest, sanity) gtest. // // Futures notes: -// - Separating _owner from the by enough space to -// avoid false sharing might be profitable. Given -// http://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate -// we know that the CAS in monitorenter will invalidate the line -// underlying _owner. We want to avoid an L1 data cache miss on that -// same line for monitorexit. Putting these : -// _recursions, _EntryList, _cxq, and _succ, all of which may be -// fetched in the inflated unlock path, on a different cache line -// would make them immune to CAS-based invalidation from the _owner -// field. +// - Separating _owner from the by enough space to +// avoid false sharing might be profitable. Given that the CAS in +// monitorenter will invalidate the line underlying _owner. We want +// to avoid an L1 data cache miss on that same line for monitorexit. +// Putting these : +// _recursions, _EntryList, _cxq, and _succ, all of which may be +// fetched in the inflated unlock path, on a different cache line +// would make them immune to CAS-based invalidation from the _owner +// field. // -// - The _recursions field should be of type int, or int32_t but not -// intptr_t. There's no reason to use a 64-bit type for this field -// in a 64-bit JVM. +// - The _recursions field should be of type int, or int32_t but not +// intptr_t. There's no reason to use a 64-bit type for this field +// in a 64-bit JVM. #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE @@ -131,15 +132,19 @@ class ObjectMonitor : public CHeapObj { static OopStorage* _oop_storage; - // The sync code expects the header field to be at offset zero (0). - // Enforced by the assert() in header_addr(). - volatile markWord _header; // displaced object header word - mark + // The sync code expects the metadata field to be at offset zero (0). + // Enforced by the assert() in metadata_addr(). + // * LM_LIGHTWEIGHT with UseObjectMonitorTable: + // Contains the _object's hashCode. + // * LM_LEGACY, LM_MONITOR, LM_LIGHTWEIGHT without UseObjectMonitorTable: + // Contains the displaced object header word - mark + volatile uintptr_t _metadata; // metadata WeakHandle _object; // backward object pointer - // Separate _header and _owner on different cache lines since both can - // have busy multi-threaded access. _header and _object are set at initial + // Separate _metadata and _owner on different cache lines since both can + // have busy multi-threaded access. _metadata and _object are set at initial // inflation. The _object does not change, so it is a good choice to share - // its cache line with _header. - DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE, sizeof(volatile markWord) + + // its cache line with _metadata. + DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE, sizeof(_metadata) + sizeof(WeakHandle)); // Used by async deflation as a marker in the _owner field. // Note that the choice of the two markers is peculiar: @@ -149,12 +154,13 @@ class ObjectMonitor : public CHeapObj { // and small values encode much better. // - We test for anonymous owner by testing for the lowest bit, therefore // DEFLATER_MARKER must *not* have that bit set. - #define DEFLATER_MARKER reinterpret_cast(2) -public: + static const uintptr_t DEFLATER_MARKER_VALUE = 2; + #define DEFLATER_MARKER reinterpret_cast(DEFLATER_MARKER_VALUE) + public: // NOTE: Typed as uintptr_t so that we can pick it up in SA, via vmStructs. static const uintptr_t ANONYMOUS_OWNER = 1; -private: + private: static void* anon_owner_ptr() { return reinterpret_cast(ANONYMOUS_OWNER); } void* volatile _owner; // pointer to owning thread OR BasicLock @@ -181,10 +187,9 @@ class ObjectMonitor : public CHeapObj { // along with other fields to determine if an ObjectMonitor can be // deflated. It is also used by the async deflation protocol. See // ObjectMonitor::deflate_monitor(). - protected: + ObjectWaiter* volatile _WaitSet; // LL of threads wait()ing on the monitor volatile int _waiters; // number of waiting threads - private: volatile int _WaitSetLock; // protects Wait Queue - simple spinlock public: @@ -213,6 +218,7 @@ class ObjectMonitor : public CHeapObj { static int Knob_SpinLimit; + static ByteSize metadata_offset() { return byte_offset_of(ObjectMonitor, _metadata); } static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); } static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); } static ByteSize cxq_offset() { return byte_offset_of(ObjectMonitor, _cxq); } @@ -233,9 +239,15 @@ class ObjectMonitor : public CHeapObj { #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \ ((in_bytes(ObjectMonitor::f ## _offset())) - checked_cast(markWord::monitor_value)) - markWord header() const; - volatile markWord* header_addr(); - void set_header(markWord hdr); + uintptr_t metadata() const; + void set_metadata(uintptr_t value); + volatile uintptr_t* metadata_addr(); + + markWord header() const; + void set_header(markWord hdr); + + intptr_t hash() const; + void set_hash(intptr_t hash); bool is_busy() const { // TODO-FIXME: assert _owner == null implies _recursions = 0 @@ -306,6 +318,8 @@ class ObjectMonitor : public CHeapObj { oop object() const; oop object_peek() const; + bool object_is_dead() const; + bool object_refers_to(oop obj) const; // Returns true if the specified thread owns the ObjectMonitor. Otherwise // returns false and throws IllegalMonitorStateException (IMSE). @@ -328,9 +342,15 @@ class ObjectMonitor : public CHeapObj { ClearSuccOnSuspend(ObjectMonitor* om) : _om(om) {} void operator()(JavaThread* current); }; + + bool enter_is_async_deflating(); public: + void enter_for_with_contention_mark(JavaThread* locking_thread, ObjectMonitorContentionMark& contention_mark); bool enter_for(JavaThread* locking_thread); bool enter(JavaThread* current); + bool try_enter(JavaThread* current); + bool spin_enter(JavaThread* current); + void enter_with_contention_mark(JavaThread* current, ObjectMonitorContentionMark& contention_mark); void exit(JavaThread* current, bool not_suspended = true); void wait(jlong millis, bool interruptible, TRAPS); void notify(TRAPS); @@ -364,8 +384,23 @@ class ObjectMonitor : public CHeapObj { void ExitEpilog(JavaThread* current, ObjectWaiter* Wakee); // Deflation support - bool deflate_monitor(); + bool deflate_monitor(Thread* current); + private: void install_displaced_markword_in_object(const oop obj); }; +// RAII object to ensure that ObjectMonitor::is_being_async_deflated() is +// stable within the context of this mark. +class ObjectMonitorContentionMark : StackObj { + DEBUG_ONLY(friend class ObjectMonitor;) + + ObjectMonitor* _monitor; + + NONCOPYABLE(ObjectMonitorContentionMark); + + public: + explicit ObjectMonitorContentionMark(ObjectMonitor* monitor); + ~ObjectMonitorContentionMark(); +}; + #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp index b371663eeb3..d26c459b1b4 100644 --- a/src/hotspot/share/runtime/objectMonitor.inline.hpp +++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp @@ -29,9 +29,13 @@ #include "logging/log.hpp" #include "oops/access.inline.hpp" +#include "oops/markWord.hpp" #include "runtime/atomic.hpp" +#include "runtime/globals.hpp" #include "runtime/lockStack.inline.hpp" #include "runtime/synchronizer.hpp" +#include "utilities/checkedCast.hpp" +#include "utilities/globalDefinitions.hpp" inline bool ObjectMonitor::is_entered(JavaThread* current) const { if (LockingMode == LM_LIGHTWEIGHT) { @@ -49,16 +53,38 @@ inline bool ObjectMonitor::is_entered(JavaThread* current) const { return false; } -inline markWord ObjectMonitor::header() const { - return Atomic::load(&_header); +inline uintptr_t ObjectMonitor::metadata() const { + return Atomic::load(&_metadata); +} + +inline void ObjectMonitor::set_metadata(uintptr_t value) { + Atomic::store(&_metadata, value); } -inline volatile markWord* ObjectMonitor::header_addr() { - return &_header; +inline volatile uintptr_t* ObjectMonitor::metadata_addr() { + STATIC_ASSERT(std::is_standard_layout::value); + STATIC_ASSERT(offsetof(ObjectMonitor, _metadata) == 0); + return &_metadata; +} + +inline markWord ObjectMonitor::header() const { + assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header"); + return markWord(metadata()); } inline void ObjectMonitor::set_header(markWord hdr) { - Atomic::store(&_header, hdr); + assert(!UseObjectMonitorTable, "Lightweight locking with OM table does not use header"); + set_metadata(hdr.value()); +} + +inline intptr_t ObjectMonitor::hash() const { + assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table"); + return metadata(); +} + +inline void ObjectMonitor::set_hash(intptr_t hash) { + assert(UseObjectMonitorTable, "Only used by lightweight locking with OM table"); + set_metadata(hash); } inline int ObjectMonitor::waiters() const { @@ -180,4 +206,31 @@ inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) { Atomic::store(&_next_om, new_value); } +inline ObjectMonitorContentionMark::ObjectMonitorContentionMark(ObjectMonitor* monitor) + : _monitor(monitor) { + _monitor->add_to_contentions(1); +} + +inline ObjectMonitorContentionMark::~ObjectMonitorContentionMark() { + _monitor->add_to_contentions(-1); +} + +inline oop ObjectMonitor::object_peek() const { + if (_object.is_null()) { + return nullptr; + } + return _object.peek(); +} + +inline bool ObjectMonitor::object_is_dead() const { + return object_peek() == nullptr; +} + +inline bool ObjectMonitor::object_refers_to(oop obj) const { + if (_object.is_null()) { + return false; + } + return _object.peek() == obj; +} + #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index 05f59de4a17..bea0b68354e 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -731,6 +731,11 @@ void ThreadSafepointState::account_safe_thread() { DEBUG_ONLY(_thread->set_visited_for_critical_count(SafepointSynchronize::safepoint_counter());) assert(!_safepoint_safe, "Must be unsafe before safe"); _safepoint_safe = true; + + // The oops in the monitor cache are cleared to prevent stale cache entries + // from keeping dead objects alive. Because these oops are always cleared + // before safepoint operations they are not visited in JavaThread::oops_do. + _thread->om_clear_monitor_cache(); } void ThreadSafepointState::restart() { diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp index a81285ac97c..f5653ccd5f4 100644 --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -32,20 +32,21 @@ #include "classfile/vmClasses.hpp" #include "gc/shared/oopStorage.hpp" #include "gc/shared/oopStorageSet.hpp" -#include "memory/universe.hpp" #include "interpreter/oopMapCache.hpp" +#include "memory/universe.hpp" #include "oops/oopHandle.inline.hpp" +#include "prims/jvmtiImpl.hpp" +#include "prims/jvmtiTagMap.hpp" +#include "prims/resolvedMethodTable.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.hpp" -#include "runtime/serviceThread.hpp" +#include "runtime/lightweightSynchronizer.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" -#include "prims/jvmtiImpl.hpp" -#include "prims/jvmtiTagMap.hpp" -#include "prims/resolvedMethodTable.hpp" +#include "runtime/serviceThread.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" #include "services/finalizerService.hpp" @@ -94,6 +95,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { bool cldg_cleanup_work = false; bool jvmti_tagmap_work = false; bool oopmap_cache_work = false; + bool object_monitor_table_work = false; { // Need state transition ThreadBlockInVM so that this thread // will be handled by safepoint correctly when this thread is @@ -121,7 +123,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { (oop_handles_to_release = JavaThread::has_oop_handles_to_release()) | (cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) | (jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) | - (oopmap_cache_work = OopMapCache::has_cleanup_work()) + (oopmap_cache_work = OopMapCache::has_cleanup_work()) | + (object_monitor_table_work = LightweightSynchronizer::needs_resize()) ) == 0) { // Wait until notified that there is some work to do or timer expires. // Some cleanup requests don't notify the ServiceThread so work needs to be done at periodic intervals. @@ -183,6 +186,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { if (oopmap_cache_work) { OopMapCache::cleanup(); } + + if (object_monitor_table_work) { + LightweightSynchronizer::resize_table(jt); + } } } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 0eec7e4c34c..0587032ec5c 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -58,6 +58,7 @@ #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -69,13 +70,14 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vm_version.hpp" #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/resourceHash.hpp" #include "utilities/macros.hpp" #include "utilities/xmlstream.hpp" @@ -1883,7 +1885,7 @@ void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThre if (!SafepointSynchronize::is_synchronizing()) { // Only try quick_enter() if we're not trying to reach a safepoint // so that the calling thread reaches the safepoint more quickly. - if (ObjectSynchronizer::quick_enter(obj, current, lock)) { + if (ObjectSynchronizer::quick_enter(obj, lock, current)) { return; } } @@ -2945,6 +2947,8 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) ) // Now the displaced header is free to move because the // object's header no longer refers to it. buf[i] = (intptr_t)lock->displaced_header().value(); + } else if (UseObjectMonitorTable) { + buf[i] = (intptr_t)lock->object_monitor_cache(); } #ifdef ASSERT else { diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index b4ff263d455..d8fb7a4734a 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -35,12 +35,14 @@ #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" +#include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/handshake.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" +#include "runtime/lightweightSynchronizer.hpp" #include "runtime/lockStack.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" @@ -52,7 +54,7 @@ #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/threads.hpp" #include "runtime/timer.hpp" #include "runtime/trimNativeHeap.hpp" @@ -276,6 +278,10 @@ void ObjectSynchronizer::initialize() { // Start the timer for deflations, so it does not trigger immediately. _last_async_deflation_time_ns = os::javaTimeNanos(); + + if (LockingMode == LM_LIGHTWEIGHT) { + LightweightSynchronizer::initialize(); + } } MonitorList ObjectSynchronizer::_in_use_list; @@ -349,7 +355,11 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al } if (mark.has_monitor()) { - ObjectMonitor* const mon = mark.monitor(); + ObjectMonitor* const mon = read_monitor(current, obj, mark); + if (LockingMode == LM_LIGHTWEIGHT && mon == nullptr) { + // Racing with inflation/deflation go slow path + return false; + } assert(mon->object() == oop(obj), "invariant"); if (mon->owner() != current) return false; // slow-path for IMS exception @@ -376,6 +386,13 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al return false; } +static bool useHeavyMonitors() { +#if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390) + return LockingMode == LM_MONITOR; +#else + return false; +#endif +} // The LockNode emitted directly at the synchronization site would have // been too big if it were to have included support for the cases of inflated @@ -383,33 +400,24 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al // Note that we can't safely call AsyncPrintJavaStack() from within // quick_enter() as our thread state remains _in_Java. -bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, - BasicLock * lock) { +bool ObjectSynchronizer::quick_enter_legacy(oop obj, BasicLock* lock, JavaThread* current) { assert(current->thread_state() == _thread_in_Java, "invariant"); - NoSafepointVerifier nsv; - if (obj == nullptr) return false; // Need to throw NPE - if (obj->klass()->is_value_based()) { - return false; + if (useHeavyMonitors()) { + return false; // Slow path } if (LockingMode == LM_LIGHTWEIGHT) { - LockStack& lock_stack = current->lock_stack(); - if (lock_stack.is_full()) { - // Always go into runtime if the lock stack is full. - return false; - } - if (lock_stack.try_recursive_enter(obj)) { - // Recursive lock successful. - current->inc_held_monitor_count(); - return true; - } + return LightweightSynchronizer::quick_enter(obj, lock, current); } + assert(LockingMode == LM_LEGACY, "legacy mode below"); + const markWord mark = obj->mark(); if (mark.has_monitor()) { - ObjectMonitor* const m = mark.monitor(); + + ObjectMonitor* const m = read_monitor(mark); // An async deflation or GC can race us before we manage to make // the ObjectMonitor busy by setting the owner below. If we detect // that race we just bail out to the slow-path here. @@ -429,18 +437,16 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, return true; } - if (LockingMode != LM_LIGHTWEIGHT) { - // This Java Monitor is inflated so obj's header will never be - // displaced to this thread's BasicLock. Make the displaced header - // non-null so this BasicLock is not seen as recursive nor as - // being locked. We do this unconditionally so that this thread's - // BasicLock cannot be mis-interpreted by any stack walkers. For - // performance reasons, stack walkers generally first check for - // stack-locking in the object's header, the second check is for - // recursive stack-locking in the displaced header in the BasicLock, - // and last are the inflated Java Monitor (ObjectMonitor) checks. - lock->set_displaced_header(markWord::unused_mark()); - } + // This Java Monitor is inflated so obj's header will never be + // displaced to this thread's BasicLock. Make the displaced header + // non-null so this BasicLock is not seen as recursive nor as + // being locked. We do this unconditionally so that this thread's + // BasicLock cannot be mis-interpreted by any stack walkers. For + // performance reasons, stack walkers generally first check for + // stack-locking in the object's header, the second check is for + // recursive stack-locking in the displaced header in the BasicLock, + // and last are the inflated Java Monitor (ObjectMonitor) checks. + lock->set_displaced_header(markWord::unused_mark()); if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) { assert(m->_recursions == 0, "invariant"); @@ -508,14 +514,6 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread } } -static bool useHeavyMonitors() { -#if defined(X86) || defined(AARCH64) || defined(PPC64) || defined(RISCV64) || defined(S390) - return LockingMode == LM_MONITOR; -#else - return false; -#endif -} - // ----------------------------------------------------------------------------- // Monitor Enter/Exit @@ -524,6 +522,11 @@ void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* lock // the locking_thread with respect to the current thread. Currently only used when // deoptimizing and re-locking locks. See Deoptimization::relock_objects assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be"); + + if (LockingMode == LM_LIGHTWEIGHT) { + return LightweightSynchronizer::enter_for(obj, lock, locking_thread); + } + if (!enter_fast_impl(obj, lock, locking_thread)) { // Inflated ObjectMonitor::enter_for is required @@ -540,8 +543,7 @@ void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* lock } } -void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { - assert(current == Thread::current(), "must be"); +void ObjectSynchronizer::enter_legacy(Handle obj, BasicLock* lock, JavaThread* current) { if (!enter_fast_impl(obj, lock, current)) { // Inflated ObjectMonitor::enter is required @@ -561,6 +563,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) // of this algorithm. Make sure to update that code if the following function is // changed. The implementation is extremely sensitive to race condition. Be careful. bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) { + assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer"); if (obj->klass()->is_value_based()) { handle_sync_on_value_based_class(obj, locking_thread); @@ -569,61 +572,7 @@ bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread locking_thread->inc_held_monitor_count(); if (!useHeavyMonitors()) { - if (LockingMode == LM_LIGHTWEIGHT) { - // Fast-locking does not use the 'lock' argument. - LockStack& lock_stack = locking_thread->lock_stack(); - if (lock_stack.is_full()) { - // We unconditionally make room on the lock stack by inflating - // the least recently locked object on the lock stack. - - // About the choice to inflate least recently locked object. - // First we must chose to inflate a lock, either some lock on - // the lock-stack or the lock that is currently being entered - // (which may or may not be on the lock-stack). - // Second the best lock to inflate is a lock which is entered - // in a control flow where there are only a very few locks being - // used, as the costly part of inflated locking is inflation, - // not locking. But this property is entirely program dependent. - // Third inflating the lock currently being entered on when it - // is not present on the lock-stack will result in a still full - // lock-stack. This creates a scenario where every deeper nested - // monitorenter must call into the runtime. - // The rational here is as follows: - // Because we cannot (currently) figure out the second, and want - // to avoid the third, we inflate a lock on the lock-stack. - // The least recently locked lock is chosen as it is the lock - // with the longest critical section. - - log_info(monitorinflation)("LockStack capacity exceeded, inflating."); - ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal); - assert(monitor->owner() == locking_thread, "must be owner=" PTR_FORMAT " locking_thread=" PTR_FORMAT " mark=" PTR_FORMAT, - p2i(monitor->owner()), p2i(locking_thread), monitor->object()->mark_acquire().value()); - assert(!lock_stack.is_full(), "must have made room here"); - } - - markWord mark = obj()->mark_acquire(); - while (mark.is_unlocked()) { - // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. - // Try to swing into 'fast-locked' state. - assert(!lock_stack.contains(obj()), "thread must not already hold the lock"); - const markWord locked_mark = mark.set_fast_locked(); - const markWord old_mark = obj()->cas_set_mark(locked_mark, mark); - if (old_mark == mark) { - // Successfully fast-locked, push object to lock-stack and return. - lock_stack.push(obj()); - return true; - } - mark = old_mark; - } - - if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) { - // Recursive lock successful. - return true; - } - - // Failed to fast lock. - return false; - } else if (LockingMode == LM_LEGACY) { + if (LockingMode == LM_LEGACY) { markWord mark = obj->mark(); if (mark.is_unlocked()) { // Anticipate successful CAS -- the ST of the displaced mark must @@ -656,37 +605,12 @@ bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread return false; } -void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { - current->dec_held_monitor_count(); +void ObjectSynchronizer::exit_legacy(oop object, BasicLock* lock, JavaThread* current) { + assert(LockingMode != LM_LIGHTWEIGHT, "Use LightweightSynchronizer"); if (!useHeavyMonitors()) { markWord mark = object->mark(); - if (LockingMode == LM_LIGHTWEIGHT) { - // Fast-locking does not use the 'lock' argument. - LockStack& lock_stack = current->lock_stack(); - if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) { - // Recursively unlocked. - return; - } - - if (mark.is_fast_locked() && lock_stack.is_recursive(object)) { - // This lock is recursive but is not at the top of the lock stack so we're - // doing an unbalanced exit. We have to fall thru to inflation below and - // let ObjectMonitor::exit() do the unlock. - } else { - while (mark.is_fast_locked()) { - // Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications. - const markWord unlocked_mark = mark.set_unlocked(); - const markWord old_mark = object->cas_set_mark(unlocked_mark, mark); - if (old_mark == mark) { - size_t recursions = lock_stack.remove(object) - 1; - assert(recursions == 0, "must not be recursive here"); - return; - } - mark = old_mark; - } - } - } else if (LockingMode == LM_LEGACY) { + if (LockingMode == LM_LEGACY) { markWord dhw = lock->displaced_header(); if (dhw.value() == 0) { // If the displaced header is null, then this exit matches up with @@ -708,7 +632,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) // Monitor owner's stack and update the BasicLocks because a // Java Monitor can be asynchronously inflated by a thread that // does not own the Java Monitor. - ObjectMonitor* m = mark.monitor(); + ObjectMonitor* m = read_monitor(mark); assert(m->object()->mark() == mark, "invariant"); assert(m->is_entered(current), "invariant"); } @@ -752,8 +676,16 @@ void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) { // enter() can make the ObjectMonitor busy. enter() returns false if // we have lost the race to async deflation and we simply try again. while (true) { - ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter); - if (monitor->enter(current)) { + ObjectMonitor* monitor; + bool entered; + if (LockingMode == LM_LIGHTWEIGHT) { + entered = LightweightSynchronizer::inflate_and_enter(obj(), inflate_cause_jni_enter, current, current) != nullptr; + } else { + monitor = inflate(current, obj(), inflate_cause_jni_enter); + entered = monitor->enter(current); + } + + if (entered) { current->inc_held_monitor_count(1, true); break; } @@ -765,9 +697,14 @@ void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) { void ObjectSynchronizer::jni_exit(oop obj, TRAPS) { JavaThread* current = THREAD; - // The ObjectMonitor* can't be async deflated until ownership is - // dropped inside exit() and the ObjectMonitor* must be !is_busy(). - ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit); + ObjectMonitor* monitor; + if (LockingMode == LM_LIGHTWEIGHT) { + monitor = LightweightSynchronizer::inflate_locked_or_imse(obj, inflate_cause_jni_exit, CHECK); + } else { + // The ObjectMonitor* can't be async deflated until ownership is + // dropped inside exit() and the ObjectMonitor* must be !is_busy(). + monitor = inflate(current, obj, inflate_cause_jni_exit); + } // If this thread has locked the object, exit the monitor. We // intentionally do not use CHECK on check_owner because we must exit the // monitor even if an exception was already pending. @@ -800,15 +737,22 @@ ObjectLocker::~ObjectLocker() { // ----------------------------------------------------------------------------- // Wait/Notify/NotifyAll // NOTE: must use heavy weight monitor to handle wait() + int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { JavaThread* current = THREAD; if (millis < 0) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } - // The ObjectMonitor* can't be async deflated because the _waiters - // field is incremented before ownership is dropped and decremented - // after ownership is regained. - ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait); + + ObjectMonitor* monitor; + if (LockingMode == LM_LIGHTWEIGHT) { + monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK_0); + } else { + // The ObjectMonitor* can't be async deflated because the _waiters + // field is incremented before ownership is dropped and decremented + // after ownership is regained. + monitor = inflate(current, obj(), inflate_cause_wait); + } DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis); monitor->wait(millis, true, THREAD); // Not CHECK as we need following code @@ -825,9 +769,14 @@ void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { if (millis < 0) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); } - ObjectSynchronizer::inflate(THREAD, - obj(), - inflate_cause_wait)->wait(millis, false, THREAD); + + ObjectMonitor* monitor; + if (LockingMode == LM_LIGHTWEIGHT) { + monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_wait, CHECK); + } else { + monitor = inflate(THREAD, obj(), inflate_cause_wait); + } + monitor->wait(millis, false, THREAD); } @@ -846,9 +795,15 @@ void ObjectSynchronizer::notify(Handle obj, TRAPS) { return; } } - // The ObjectMonitor* can't be async deflated until ownership is - // dropped by the calling thread. - ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify); + + ObjectMonitor* monitor; + if (LockingMode == LM_LIGHTWEIGHT) { + monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); + } else { + // The ObjectMonitor* can't be async deflated until ownership is + // dropped by the calling thread. + monitor = inflate(current, obj(), inflate_cause_notify); + } monitor->notify(CHECK); } @@ -868,9 +823,15 @@ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { return; } } - // The ObjectMonitor* can't be async deflated until ownership is - // dropped by the calling thread. - ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify); + + ObjectMonitor* monitor; + if (LockingMode == LM_LIGHTWEIGHT) { + monitor = LightweightSynchronizer::inflate_locked_or_imse(obj(), inflate_cause_notify, CHECK); + } else { + // The ObjectMonitor* can't be async deflated until ownership is + // dropped by the calling thread. + monitor = inflate(current, obj(), inflate_cause_notify); + } monitor->notifyAll(CHECK); } @@ -968,7 +929,7 @@ static markWord read_stable_mark(oop obj) { // There are simple ways to "diffuse" the middle address bits over the // generated hashCode values: -static inline intptr_t get_next_hash(Thread* current, oop obj) { +static intptr_t get_next_hash(Thread* current, oop obj) { intptr_t value = 0; if (hashCode == 0) { // This form uses global Park-Miller RNG. @@ -1008,7 +969,33 @@ static inline intptr_t get_next_hash(Thread* current, oop obj) { return value; } +static intptr_t install_hash_code(Thread* current, oop obj) { + assert(UseObjectMonitorTable && LockingMode == LM_LIGHTWEIGHT, "must be"); + + markWord mark = obj->mark_acquire(); + for (;;) { + intptr_t hash = mark.hash(); + if (hash != 0) { + return hash; + } + + hash = get_next_hash(current, obj); + const markWord old_mark = mark; + const markWord new_mark = old_mark.copy_set_hash(hash); + + mark = obj->cas_set_mark(new_mark, old_mark); + if (old_mark == mark) { + return hash; + } + } +} + intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { + if (UseObjectMonitorTable) { + // Since the monitor isn't in the object header, the hash can simply be + // installed in the object header. + return install_hash_code(current, obj); + } while (true) { ObjectMonitor* monitor = nullptr; @@ -1102,7 +1089,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { hash = get_next_hash(current, obj); // get a new hash temp = mark.copy_set_hash(hash) ; // merge the hash into header assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); - uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); + uintptr_t v = Atomic::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value()); test = markWord(v); if (test != mark) { // The attempt to update the ObjectMonitor's header/dmw field @@ -1114,7 +1101,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); } - if (monitor->is_being_async_deflated()) { + if (monitor->is_being_async_deflated() && !UseObjectMonitorTable) { // If we detect that async deflation has occurred, then we // attempt to restore the header/dmw to the object's header // so that we only retry once if the deflater thread happens @@ -1145,11 +1132,25 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current, return current->lock_stack().contains(h_obj()); } - if (mark.has_monitor()) { + while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) { + ObjectMonitor* monitor = read_monitor(current, obj, mark); + if (monitor != nullptr) { + return monitor->is_entered(current) != 0; + } + // Racing with inflation/deflation, retry + mark = obj->mark_acquire(); + + if (mark.is_fast_locked()) { + // Some other thread fast_locked, current could not have held the lock + return false; + } + } + + if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) { // Inflated monitor so header points to ObjectMonitor (tagged pointer). // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. - ObjectMonitor* monitor = mark.monitor(); + ObjectMonitor* monitor = read_monitor(mark); return monitor->is_entered(current) != 0; } // Unlocked case, header in place @@ -1173,11 +1174,25 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob return Threads::owning_thread_from_object(t_list, h_obj()); } - if (mark.has_monitor()) { + while (LockingMode == LM_LIGHTWEIGHT && mark.has_monitor()) { + ObjectMonitor* monitor = read_monitor(Thread::current(), obj, mark); + if (monitor != nullptr) { + return Threads::owning_thread_from_monitor(t_list, monitor); + } + // Racing with inflation/deflation, retry + mark = obj->mark_acquire(); + + if (mark.is_fast_locked()) { + // Some other thread fast_locked + return Threads::owning_thread_from_object(t_list, h_obj()); + } + } + + if (LockingMode != LM_LIGHTWEIGHT && mark.has_monitor()) { // Inflated monitor so header points to ObjectMonitor (tagged pointer). // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. - ObjectMonitor* monitor = mark.monitor(); + ObjectMonitor* monitor = read_monitor(mark); assert(monitor != nullptr, "monitor should be non-null"); // owning_thread_from_monitor() may also return null here: return Threads::owning_thread_from_monitor(t_list, monitor); @@ -1389,9 +1404,10 @@ static void post_monitor_inflate_event(EventJavaMonitorInflate* event, // Fast path code shared by multiple functions void ObjectSynchronizer::inflate_helper(oop obj) { + assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter"); markWord mark = obj->mark_acquire(); if (mark.has_monitor()) { - ObjectMonitor* monitor = mark.monitor(); + ObjectMonitor* monitor = read_monitor(mark); markWord dmw = monitor->header(); assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); return; @@ -1401,39 +1417,25 @@ void ObjectSynchronizer::inflate_helper(oop obj) { ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) { assert(current == Thread::current(), "must be"); - if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) { - return inflate_impl(JavaThread::cast(current), obj, cause); - } - return inflate_impl(nullptr, obj, cause); + assert(LockingMode != LM_LIGHTWEIGHT, "only inflate through enter"); + return inflate_impl(obj, cause); } ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) { assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be"); - return inflate_impl(thread, obj, cause); -} - -ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) { - // The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires - // that the inflating_thread == Thread::current() or is suspended throughout the call by - // some other mechanism. - // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non - // JavaThread. (As may still be the case from FastHashCode). However it is only - // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread - // is set when called from ObjectSynchronizer::enter from the owning thread, - // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit. + assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_for"); + return inflate_impl(obj, cause); +} + +ObjectMonitor* ObjectSynchronizer::inflate_impl(oop object, const InflateCause cause) { + assert(LockingMode != LM_LIGHTWEIGHT, "LM_LIGHTWEIGHT cannot use inflate_impl"); EventJavaMonitorInflate event; for (;;) { const markWord mark = object->mark_acquire(); // The mark can be in one of the following states: - // * inflated - Just return if using stack-locking. - // If using fast-locking and the ObjectMonitor owner - // is anonymous and the inflating_thread owns the - // object lock, then we make the inflating_thread - // the ObjectMonitor owner and remove the lock from - // the inflating_thread's lock stack. - // * fast-locked - Coerce it to inflated from fast-locked. + // * inflated - Just return it. // * stack-locked - Coerce it to inflated from stack-locked. // * INFLATING - Busy wait for conversion from stack-locked to // inflated. @@ -1444,80 +1446,18 @@ ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oo ObjectMonitor* inf = mark.monitor(); markWord dmw = inf->header(); assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); - if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && - inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) { - inf->set_owner_from_anonymous(inflating_thread); - size_t removed = inflating_thread->lock_stack().remove(object); - inf->set_recursions(removed - 1); - } return inf; } - if (LockingMode != LM_LIGHTWEIGHT) { - // New lightweight locking does not use INFLATING. - // CASE: inflation in progress - inflating over a stack-lock. - // Some other thread is converting from stack-locked to inflated. - // Only that thread can complete inflation -- other threads must wait. - // The INFLATING value is transient. - // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. - // We could always eliminate polling by parking the thread on some auxiliary list. - if (mark == markWord::INFLATING()) { - read_stable_mark(object); - continue; - } - } - - // CASE: fast-locked - // Could be fast-locked either by the inflating_thread or by some other thread. - // - // Note that we allocate the ObjectMonitor speculatively, _before_ - // attempting to set the object's mark to the new ObjectMonitor. If - // the inflating_thread owns the monitor, then we set the ObjectMonitor's - // owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner - // to anonymous. If we lose the race to set the object's mark to the - // new ObjectMonitor, then we just delete it and loop around again. - // - LogStreamHandle(Trace, monitorinflation) lsh; - if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) { - ObjectMonitor* monitor = new ObjectMonitor(object); - monitor->set_header(mark.set_unlocked()); - bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object); - if (own) { - // Owned by inflating_thread. - monitor->set_owner_from(nullptr, inflating_thread); - } else { - // Owned by somebody else. - monitor->set_owner_anonymous(); - } - markWord monitor_mark = markWord::encode(monitor); - markWord old_mark = object->cas_set_mark(monitor_mark, mark); - if (old_mark == mark) { - // Success! Return inflated monitor. - if (own) { - size_t removed = inflating_thread->lock_stack().remove(object); - monitor->set_recursions(removed - 1); - } - // Once the ObjectMonitor is configured and object is associated - // with the ObjectMonitor, it is safe to allow async deflation: - _in_use_list.add(monitor); - - // Hopefully the performance counters are allocated on distinct - // cache lines to avoid false sharing on MP systems ... - OM_PERFDATA_OP(Inflations, inc()); - if (log_is_enabled(Trace, monitorinflation)) { - ResourceMark rm; - lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" - INTPTR_FORMAT ", type='%s'", p2i(object), - object->mark().value(), object->klass()->external_name()); - } - if (event.should_commit()) { - post_monitor_inflate_event(&event, object, cause); - } - return monitor; - } else { - delete monitor; - continue; // Interference -- just retry - } + // CASE: inflation in progress - inflating over a stack-lock. + // Some other thread is converting from stack-locked to inflated. + // Only that thread can complete inflation -- other threads must wait. + // The INFLATING value is transient. + // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. + // We could always eliminate polling by parking the thread on some auxiliary list. + if (mark == markWord::INFLATING()) { + read_stable_mark(object); + continue; } // CASE: stack-locked @@ -1531,8 +1471,8 @@ ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oo // the odds of inflation contention. If we lose the race to set INFLATING, // then we just delete the ObjectMonitor and loop around again. // + LogStreamHandle(Trace, monitorinflation) lsh; if (LockingMode == LM_LEGACY && mark.has_locker()) { - assert(LockingMode != LM_LIGHTWEIGHT, "cannot happen with new lightweight locking"); ObjectMonitor* m = new ObjectMonitor(object); // Optimistically prepare the ObjectMonitor - anticipate successful CAS // We do this before the CAS in order to minimize the length of time @@ -1664,13 +1604,14 @@ ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oo size_t ObjectSynchronizer::deflate_monitor_list(ObjectMonitorDeflationSafepointer* safepointer) { MonitorList::Iterator iter = _in_use_list.iterator(); size_t deflated_count = 0; + Thread* current = Thread::current(); while (iter.has_next()) { if (deflated_count >= (size_t)MonitorDeflationMax) { break; } ObjectMonitor* mid = iter.next(); - if (mid->deflate_monitor()) { + if (mid->deflate_monitor(current)) { deflated_count++; } @@ -1688,6 +1629,11 @@ class HandshakeForDeflation : public HandshakeClosure { void do_thread(Thread* thread) { log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" INTPTR_FORMAT, p2i(thread)); + if (thread->is_Java_thread()) { + // Clear OM cache + JavaThread* jt = JavaThread::cast(thread); + jt->om_clear_monitor_cache(); + } } }; @@ -1834,6 +1780,14 @@ size_t ObjectSynchronizer::deflate_idle_monitors() { GrowableArray delete_list((int)deflated_count); unlinked_count = _in_use_list.unlink_deflated(deflated_count, &delete_list, &safepointer); +#ifdef ASSERT + if (UseObjectMonitorTable) { + for (ObjectMonitor* monitor : delete_list) { + assert(!LightweightSynchronizer::contains_monitor(current, monitor), "Should have been removed"); + } + } +#endif + log.before_handshake(unlinked_count); // A JavaThread needs to handshake in order to safely free the @@ -2042,29 +1996,35 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out, return; } - if (n->header().value() == 0) { + + if (n->metadata() == 0) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must " - "have non-null _header field.", p2i(n)); + "have non-null _metadata (header/hash) field.", p2i(n)); *error_cnt_p = *error_cnt_p + 1; } + const oop obj = n->object_peek(); - if (obj != nullptr) { - const markWord mark = obj->mark(); - if (!mark.has_monitor()) { - out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " - "object does not think it has a monitor: obj=" - INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), - p2i(obj), mark.value()); - *error_cnt_p = *error_cnt_p + 1; - } - ObjectMonitor* const obj_mon = mark.monitor(); - if (n != obj_mon) { - out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " - "object does not refer to the same monitor: obj=" - INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" - INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); - *error_cnt_p = *error_cnt_p + 1; - } + if (obj == nullptr) { + return; + } + + const markWord mark = obj->mark(); + if (!mark.has_monitor()) { + out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " + "object does not think it has a monitor: obj=" + INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), + p2i(obj), mark.value()); + *error_cnt_p = *error_cnt_p + 1; + return; + } + + ObjectMonitor* const obj_mon = read_monitor(Thread::current(), obj, mark); + if (n != obj_mon) { + out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " + "object does not refer to the same monitor: obj=" + INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" + INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); + *error_cnt_p = *error_cnt_p + 1; } } @@ -2087,10 +2047,10 @@ void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out, bool log_ monitors_iterate([&](ObjectMonitor* monitor) { if (is_interesting(monitor)) { const oop obj = monitor->object_peek(); - const markWord mark = monitor->header(); + const intptr_t hash = UseObjectMonitorTable ? monitor->hash() : monitor->header().hash(); ResourceMark rm; out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(monitor), - monitor->is_busy(), mark.hash() != 0, monitor->owner() != nullptr, + monitor->is_busy(), hash != 0, monitor->owner() != nullptr, p2i(obj), obj == nullptr ? "" : obj->klass()->external_name()); if (monitor->is_busy()) { out->print(" (%s)", monitor->is_busy_to_string(&ss)); diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp index 493303df661..5b171560ee1 100644 --- a/src/hotspot/share/runtime/synchronizer.hpp +++ b/src/hotspot/share/runtime/synchronizer.hpp @@ -29,6 +29,7 @@ #include "oops/markWord.hpp" #include "runtime/basicLock.hpp" #include "runtime/handles.hpp" +#include "runtime/javaThread.hpp" #include "utilities/resourceHash.hpp" template class GrowableArray; @@ -93,8 +94,9 @@ class ObjectSynchronizer : AllStatic { // deoptimization at monitor exit. Hence, it does not take a Handle argument. // This is the "slow path" version of monitor enter and exit. - static void enter(Handle obj, BasicLock* lock, JavaThread* current); - static void exit(oop obj, BasicLock* lock, JavaThread* current); + static inline void enter(Handle obj, BasicLock* lock, JavaThread* current); + static inline void exit(oop obj, BasicLock* lock, JavaThread* current); + // Used to enter a monitor for another thread. This requires that the // locking_thread is suspended, and that entering on a potential // inflated monitor may only contend with deflation. That is the obj being @@ -106,6 +108,9 @@ class ObjectSynchronizer : AllStatic { // inflated monitor enter. static bool enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread); + static bool quick_enter_legacy(oop obj, BasicLock* Lock, JavaThread* current); + static void enter_legacy(Handle obj, BasicLock* Lock, JavaThread* current); + static void exit_legacy(oop obj, BasicLock* lock, JavaThread* current); public: // Used only to handle jni locks or other unmatched monitor enter/exit // Internally they will use heavy weight monitor. @@ -118,7 +123,7 @@ class ObjectSynchronizer : AllStatic { static void notifyall(Handle obj, TRAPS); static bool quick_notify(oopDesc* obj, JavaThread* current, bool All); - static bool quick_enter(oop obj, JavaThread* current, BasicLock* Lock); + static inline bool quick_enter(oop obj, BasicLock* Lock, JavaThread* current); // Special internal-use-only method for use by JVM infrastructure // that needs to wait() on a java-level object but that can't risk @@ -132,13 +137,16 @@ class ObjectSynchronizer : AllStatic { private: // Shared implementation between the different LockingMode. - static ObjectMonitor* inflate_impl(JavaThread* thread, oop obj, const InflateCause cause); + static ObjectMonitor* inflate_impl(oop obj, const InflateCause cause); public: // This version is only for internal use static void inflate_helper(oop obj); static const char* inflate_cause_name(const InflateCause cause); + inline static ObjectMonitor* read_monitor(markWord mark); + inline static ObjectMonitor* read_monitor(Thread* current, oop obj, markWord mark); + // Returns the identity hash value for an oop // NOTE: It may cause monitor inflation static intptr_t FastHashCode(Thread* current, oop obj); @@ -200,6 +208,7 @@ class ObjectSynchronizer : AllStatic { private: friend class SynchronizerTest; + friend class LightweightSynchronizer; static MonitorList _in_use_list; static volatile bool _is_async_deflation_requested; diff --git a/src/hotspot/share/runtime/synchronizer.inline.hpp b/src/hotspot/share/runtime/synchronizer.inline.hpp new file mode 100644 index 00000000000..53a9f99a39e --- /dev/null +++ b/src/hotspot/share/runtime/synchronizer.inline.hpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP +#define SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP + +#include "runtime/synchronizer.hpp" + +#include "runtime/lightweightSynchronizer.hpp" +#include "runtime/safepointVerifiers.hpp" + +inline ObjectMonitor* ObjectSynchronizer::read_monitor(markWord mark) { + return mark.monitor(); +} + +inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, markWord mark) { + if (!UseObjectMonitorTable) { + return read_monitor(mark); + } else { + return LightweightSynchronizer::get_monitor_from_table(current, obj); + } +} + +inline void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { + assert(current == Thread::current(), "must be"); + + if (LockingMode == LM_LIGHTWEIGHT) { + LightweightSynchronizer::enter(obj, lock, current); + } else { + enter_legacy(obj, lock, current); + } +} + +inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { + assert(current->thread_state() == _thread_in_Java, "invariant"); + NoSafepointVerifier nsv; + if (obj == nullptr) return false; // Need to throw NPE + + if (obj->klass()->is_value_based()) { + return false; + } + + if (LockingMode == LM_LIGHTWEIGHT) { + return LightweightSynchronizer::quick_enter(obj, lock, current); + } else { + return quick_enter_legacy(obj, lock, current); + } +} + +inline void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) { + current->dec_held_monitor_count(); + + if (LockingMode == LM_LIGHTWEIGHT) { + LightweightSynchronizer::exit(object, current); + } else { + exit_legacy(object, lock, current); + } +} + +#endif // SHARE_RUNTIME_SYNCHRONIZER_INLINE_HPP diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index ebd5af5b74e..58eead692c6 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -50,7 +50,7 @@ #include "runtime/signature.hpp" #include "runtime/stackFrameStream.inline.hpp" #include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" +#include "runtime/synchronizer.inline.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vframe_hp.hpp" @@ -247,13 +247,16 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { markWord mark = monitor->owner()->mark(); // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. - if (mark.has_monitor() && - ( // we have marked ourself as pending on this monitor - mark.monitor() == thread()->current_pending_monitor() || + if (mark.has_monitor()) { + ObjectMonitor* mon = ObjectSynchronizer::read_monitor(current, monitor->owner(), mark); + if (// if the monitor is null we must be in the process of locking + mon == nullptr || + // we have marked ourself as pending on this monitor + mon == thread()->current_pending_monitor() || // we are not the owner of this monitor - !mark.monitor()->is_entered(thread()) - )) { - lock_state = "waiting to lock"; + !mon->is_entered(thread())) { + lock_state = "waiting to lock"; + } } } print_locked_object_class_name(st, Handle(current, monitor->owner()), lock_state); diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 913f988e48b..fe9620586be 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -777,11 +777,11 @@ /* Monitors */ \ /************/ \ \ - volatile_nonstatic_field(ObjectMonitor, _header, markWord) \ + volatile_nonstatic_field(ObjectMonitor, _metadata, uintptr_t) \ unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \ unchecked_nonstatic_field(ObjectMonitor, _owner, sizeof(void *)) /* NOTE: no type */ \ volatile_nonstatic_field(ObjectMonitor, _next_om, ObjectMonitor*) \ - volatile_nonstatic_field(BasicLock, _displaced_header, markWord) \ + volatile_nonstatic_field(BasicLock, _metadata, uintptr_t) \ nonstatic_field(ObjectMonitor, _contentions, int) \ volatile_nonstatic_field(ObjectMonitor, _waiters, int) \ volatile_nonstatic_field(ObjectMonitor, _recursions, intx) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java index 7512257a197..a3a06ec73f5 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java @@ -156,6 +156,16 @@ public ObjectMonitor monitor() { if (Assert.ASSERTS_ENABLED) { Assert.that(hasMonitor(), "check"); } + if (VM.getVM().getCommandLineFlag("UseObjectMonitorTable").getBool()) { + Iterator it = ObjectSynchronizer.objectMonitorIterator(); + while (it != null && it.hasNext()) { + ObjectMonitor mon = (ObjectMonitor)it.next(); + if (getAddress().equals(mon.object())) { + return mon; + } + } + return null; + } // Use xor instead of &~ to provide one extra tag-bit check. Address monAddr = valueAsAddress().xorWithMask(monitorValue); return new ObjectMonitor(monAddr); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java index 55e5d0e4598..4028bae3f5b 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java @@ -43,7 +43,7 @@ public void update(Observable o, Object data) { private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("BasicLock"); - displacedHeaderField = type.getCIntegerField("_displaced_header"); + displacedHeaderField = type.getCIntegerField("_metadata"); } private static CIntegerField displacedHeaderField; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java index 3f3c67fbf26..e16982ce434 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectMonitor.java @@ -45,8 +45,8 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc heap = VM.getVM().getObjectHeap(); Type type = db.lookupType("ObjectMonitor"); - sun.jvm.hotspot.types.Field f = type.getField("_header"); - headerFieldOffset = f.getOffset(); + sun.jvm.hotspot.types.Field f = type.getField("_metadata"); + metadataFieldOffset = f.getOffset(); f = type.getField("_object"); objectFieldOffset = f.getOffset(); f = type.getField("_owner"); @@ -65,7 +65,7 @@ public ObjectMonitor(Address addr) { } public Mark header() { - return new Mark(addr.addOffsetTo(headerFieldOffset)); + return new Mark(addr.addOffsetTo(metadataFieldOffset)); } // FIXME @@ -114,7 +114,7 @@ public int contentions() { // vmStructs.cpp because they aren't strongly typed in the VM, or // would confuse the SA's type system. private static ObjectHeap heap; - private static long headerFieldOffset; + private static long metadataFieldOffset; private static long objectFieldOffset; private static long ownerFieldOffset; private static long nextOMFieldOffset; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java index f8e71aa3761..a9c97e06a44 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/ObjectSynchronizer.java @@ -55,6 +55,9 @@ public long identityHashValueFor(Oop obj) { // FIXME: can not generate marks in debugging system return mark.hash(); } else if (mark.hasMonitor()) { + if (VM.getVM().getCommandLineFlag("UseObjectMonitorTable").getBool()) { + return mark.hash(); + } ObjectMonitor monitor = mark.monitor(); Mark temp = monitor.header(); return temp.hash(); diff --git a/test/hotspot/gtest/runtime/test_objectMonitor.cpp b/test/hotspot/gtest/runtime/test_objectMonitor.cpp index 62eeba1587a..fcdb102fcf7 100644 --- a/test/hotspot/gtest/runtime/test_objectMonitor.cpp +++ b/test/hotspot/gtest/runtime/test_objectMonitor.cpp @@ -27,20 +27,20 @@ #include "unittest.hpp" TEST_VM(ObjectMonitor, sanity) { - uint cache_line_size = VM_Version::L1_data_cache_line_size(); + uint cache_line_size = VM_Version::L1_data_cache_line_size(); - if (cache_line_size != 0) { - // We were able to determine the L1 data cache line size so - // do some cache line specific sanity checks - EXPECT_EQ((size_t) 0, sizeof (PaddedEnd) % cache_line_size) - << "PaddedEnd size is not a " - << "multiple of a cache line which permits false sharing. " - << "sizeof(PaddedEnd) = " - << sizeof (PaddedEnd) - << "; cache_line_size = " << cache_line_size; + if (cache_line_size != 0) { - EXPECT_GE((size_t) in_bytes(ObjectMonitor::owner_offset()), cache_line_size) - << "the _header and _owner fields are closer " + EXPECT_EQ(in_bytes(ObjectMonitor::metadata_offset()), 0) + << "_metadata at a non 0 offset. metadata_offset = " + << in_bytes(ObjectMonitor::metadata_offset()); + + EXPECT_GE((size_t) in_bytes(ObjectMonitor::owner_offset()), cache_line_size) + << "the _metadata and _owner fields are closer " + << "than a cache line which permits false sharing."; + + EXPECT_GE((size_t) in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), cache_line_size) + << "the _owner and _recursions fields are closer " << "than a cache line which permits false sharing."; } } diff --git a/test/hotspot/jtreg/runtime/Monitor/UseObjectMonitorTableTest.java b/test/hotspot/jtreg/runtime/Monitor/UseObjectMonitorTableTest.java new file mode 100644 index 00000000000..6af1602e338 --- /dev/null +++ b/test/hotspot/jtreg/runtime/Monitor/UseObjectMonitorTableTest.java @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test id=NormalDeflation + * @summary A collection of small tests using synchronized, wait, notify to try + * and achieve good cheap coverage of UseObjectMonitorTable. + * @library /test/lib + * @run main/othervm -XX:+UnlockDiagnosticVMOptions + * -XX:+UseObjectMonitorTable + * UseObjectMonitorTableTest + */ + +/** + * @test id=ExtremeDeflation + * @summary Run the same tests but with deflation running constantly. + * @library /test/lib + * @run main/othervm -XX:+UnlockDiagnosticVMOptions + * -XX:GuaranteedAsyncDeflationInterval=1 + * -XX:+UseObjectMonitorTable + * UseObjectMonitorTableTest + */ + +import jdk.test.lib.Utils; + +import java.lang.Runnable; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.Random; +import java.util.stream.Stream; + +public class UseObjectMonitorTableTest { + static final ThreadFactory TF = Executors.defaultThreadFactory(); + + static class WaitNotifyTest implements Runnable { + static final int ITERATIONS = 10_000; + static final int THREADS = 10; + final WaitNotifySyncChannel startLatchChannel = new WaitNotifySyncChannel(); + final WaitNotifySyncChannel endLatchChannel = new WaitNotifySyncChannel(); + int count = 0; + + static class WaitNotifyCountDownLatch { + int latch; + WaitNotifyCountDownLatch(int count) { + latch = count; + } + synchronized void await() { + while (latch != 0) { + try { + wait(); + } catch (InterruptedException e) { + throw new RuntimeException("WaitNotifyTest: Unexpected interrupt", e); + } + } + } + synchronized void countDown() { + if (latch != 0) { + latch--; + if (latch == 0) { + notifyAll(); + } + } + } + } + static class WaitNotifySyncChannel extends WaitNotifyCountDownLatch { + WaitNotifyCountDownLatch object; + WaitNotifySyncChannel() { super(0); } + synchronized void send(WaitNotifyCountDownLatch object, int count) { + await(); + latch = count; + this.object = object; + notifyAll(); + } + synchronized WaitNotifyCountDownLatch receive() { + while (latch == 0) { + try { + wait(); + } catch (InterruptedException e) { + throw new RuntimeException("WaitNotifyTest: Unexpected interrupt", e); + } + } + countDown(); + return object; + } + } + synchronized int getCount() { + return count; + } + synchronized void increment() { + count++; + } + public void run() { + System.out.println("WaitNotifyTest started."); + for (int t = 0; t < THREADS; t++) { + TF.newThread(() -> { + for (int i = 0; i < ITERATIONS; i++) { + startLatchChannel.receive().await(); + increment(); + endLatchChannel.receive().countDown(); + } + }).start(); + } + for (int i = 0; i < ITERATIONS; i++) { + WaitNotifyCountDownLatch startLatch = new WaitNotifyCountDownLatch(1); + WaitNotifyCountDownLatch endLatch = new WaitNotifyCountDownLatch(THREADS); + int count = getCount(); + if (count != i * THREADS) { + throw new RuntimeException("WaitNotifyTest: Invalid Count " + count + + " pre-iteration " + i); + } + startLatchChannel.send(startLatch, 10); + startLatch.countDown(); + endLatchChannel.send(endLatch, 10); + endLatch.await(); + } + int count = getCount(); + if (count != ITERATIONS * THREADS) { + throw new RuntimeException("WaitNotifyTest: Invalid Count " + count); + } + System.out.println("WaitNotifyTest passed."); + } + } + + static class RandomDepthTest implements Runnable { + static final int THREADS = 10; + static final int ITERATIONS = 10_000; + static final int MAX_DEPTH = 20; + static final int MAX_RECURSION_COUNT = 10; + static final double RECURSION_CHANCE = .25; + final Random random = Utils.getRandomInstance(); + final Locker lockers[] = new Locker[MAX_DEPTH]; + final CyclicBarrier syncBarrier = new CyclicBarrier(THREADS + 1); + int count = 0; + + class Locker { + final int depth; + Locker(int depth) { + this.depth = depth; + } + synchronized int getCount() { + if (depth == MAX_DEPTH) { + return count; + } + return lockers[depth].getCount(); + } + synchronized void increment(int recursion_count) { + if (recursion_count != MAX_RECURSION_COUNT && + random.nextDouble() < RECURSION_CHANCE) { + this.increment(recursion_count + 1); + return; + } + if (depth == MAX_DEPTH) { + count++; + return; + } + lockers[depth + random.nextInt(MAX_DEPTH - depth)].increment(recursion_count); + } + synchronized Locker create() { + if (depth != MAX_DEPTH) { + lockers[depth] = (new Locker(depth + 1)).create(); + } + return this; + } + } + int getCount() { + return lockers[0].getCount(); + } + void increment() { + lockers[random.nextInt(MAX_DEPTH)].increment(0); + } + void create() { + lockers[0] = (new Locker(1)).create(); + } + void syncPoint() { + try { + syncBarrier.await(); + } catch (InterruptedException e) { + throw new RuntimeException("RandomDepthTest: Unexpected interrupt", e); + } catch (BrokenBarrierException e) { + throw new RuntimeException("RandomDepthTest: Unexpected broken barrier", e); + } + } + public void run() { + System.out.println("RandomDepthTest started."); + for (int t = 0; t < THREADS; t++) { + TF.newThread(() -> { + syncPoint(); + for (int i = 0; i < ITERATIONS; i++) { + increment(); + } + syncPoint(); + }).start(); + } + create(); + syncPoint(); + syncPoint(); + int count = getCount(); + if (count != THREADS * ITERATIONS) { + throw new RuntimeException("RandomDepthTest: Invalid Count " + count); + } + System.out.println("RandomDepthTest passed."); + } + } + + public static void main(String[] args) { + Stream.of( + TF.newThread(new WaitNotifyTest()), + TF.newThread(new RandomDepthTest()) + ).map(t -> { + t.start(); + return t; + }).forEach(t -> { + try { + t.join(); + } catch (InterruptedException e) { + throw new RuntimeException("UseObjectMonitorTableTest: Unexpected interrupt", e); + } + }); + + System.out.println("UseObjectMonitorTableTest passed."); + } +} diff --git a/test/hotspot/jtreg/runtime/logging/MonitorInflationTest.java b/test/hotspot/jtreg/runtime/logging/MonitorInflationTest.java index 737c64621e8..8666e2ee774 100644 --- a/test/hotspot/jtreg/runtime/logging/MonitorInflationTest.java +++ b/test/hotspot/jtreg/runtime/logging/MonitorInflationTest.java @@ -38,7 +38,7 @@ public class MonitorInflationTest { static void analyzeOutputOn(ProcessBuilder pb) throws Exception { OutputAnalyzer output = new OutputAnalyzer(pb.start()); - output.shouldContain("inflate(has_locker):"); + output.shouldContain("inflate:"); output.shouldContain("type='MonitorInflationTest$Waiter'"); output.shouldContain("I've been waiting."); output.shouldHaveExitValue(0); diff --git a/test/micro/org/openjdk/bench/vm/lang/LockUnlock.java b/test/micro/org/openjdk/bench/vm/lang/LockUnlock.java index 49181971768..3ed862e8218 100644 --- a/test/micro/org/openjdk/bench/vm/lang/LockUnlock.java +++ b/test/micro/org/openjdk/bench/vm/lang/LockUnlock.java @@ -54,6 +54,8 @@ public class LockUnlock { public Object lockObject1; public Object lockObject2; + public volatile Object lockObject3Inflated; + public volatile Object lockObject4Inflated; public int factorial; public int dummyInt1; public int dummyInt2; @@ -62,13 +64,28 @@ public class LockUnlock { public void setup() { lockObject1 = new Object(); lockObject2 = new Object(); + lockObject3Inflated = new Object(); + lockObject4Inflated = new Object(); + + // Inflate the lock to use an ObjectMonitor + try { + synchronized (lockObject3Inflated) { + lockObject3Inflated.wait(1); + } + synchronized (lockObject4Inflated) { + lockObject4Inflated.wait(1); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + dummyInt1 = 47; dummyInt2 = 11; // anything } /** Perform a synchronized on a local object within a loop. */ @Benchmark - public void testSimpleLockUnlock() { + public void testBasicSimpleLockUnlockLocal() { Object localObject = lockObject1; for (int i = 0; i < innerCount; i++) { synchronized (localObject) { @@ -78,9 +95,43 @@ public void testSimpleLockUnlock() { } } + /** Perform a synchronized on an object within a loop. */ + @Benchmark + public void testBasicSimpleLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject1) { + dummyInt1++; + dummyInt2++; + } + } + } + + /** Perform a synchronized on a local object within a loop. */ + @Benchmark + public void testInflatedSimpleLockUnlockLocal() { + Object localObject = lockObject3Inflated; + for (int i = 0; i < innerCount; i++) { + synchronized (localObject) { + dummyInt1++; + dummyInt2++; + } + } + } + + /** Perform a synchronized on an object within a loop. */ + @Benchmark + public void testInflatedSimpleLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + dummyInt1++; + dummyInt2++; + } + } + } + /** Perform a recursive synchronized on a local object within a loop. */ @Benchmark - public void testRecursiveLockUnlock() { + public void testBasicRecursiveLockUnlockLocal() { Object localObject = lockObject1; for (int i = 0; i < innerCount; i++) { synchronized (localObject) { @@ -92,9 +143,22 @@ public void testRecursiveLockUnlock() { } } + /** Perform a recursive synchronized on an object within a loop. */ + @Benchmark + public void testBasicRecursiveLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject1) { + synchronized (lockObject1) { + dummyInt1++; + dummyInt2++; + } + } + } + } + /** Perform two synchronized after each other on the same local object. */ @Benchmark - public void testSerialLockUnlock() { + public void testBasicSerialLockUnlockLocal() { Object localObject = lockObject1; for (int i = 0; i < innerCount; i++) { synchronized (localObject) { @@ -106,6 +170,126 @@ public void testSerialLockUnlock() { } } + /** Perform two synchronized after each other on the same object. */ + @Benchmark + public void testBasicSerialLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject1) { + dummyInt1++; + } + synchronized (lockObject1) { + dummyInt2++; + } + } + } + + /** Perform two synchronized after each other on the same local object. */ + @Benchmark + public void testInflatedSerialLockUnlockLocal() { + Object localObject = lockObject3Inflated; + for (int i = 0; i < innerCount; i++) { + synchronized (localObject) { + dummyInt1++; + } + synchronized (localObject) { + dummyInt2++; + } + } + } + + /** Perform two synchronized after each other on the same object. */ + @Benchmark + public void testInflatedSerialLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + dummyInt1++; + } + synchronized (lockObject3Inflated) { + dummyInt2++; + } + } + } + + /** Perform two synchronized after each other on the same object. */ + @Benchmark + public void testInflatedMultipleSerialLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + dummyInt1++; + } + synchronized (lockObject4Inflated) { + dummyInt2++; + } + } + } + + /** Perform two synchronized after each other on the same object. */ + @Benchmark + public void testInflatedMultipleRecursiveLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + dummyInt1++; + synchronized (lockObject4Inflated) { + dummyInt2++; + } + } + } + } + + /** Perform a recursive-only synchronized on a local object within a loop. */ + @Benchmark + public void testInflatedRecursiveOnlyLockUnlockLocal() { + Object localObject = lockObject3Inflated; + synchronized (localObject) { + for (int i = 0; i < innerCount; i++) { + synchronized (localObject) { + dummyInt1++; + dummyInt2++; + } + } + } + } + + /** Perform a recursive-only synchronized on an object within a loop. */ + @Benchmark + public void testInflatedRecursiveOnlyLockUnlock() { + synchronized (lockObject3Inflated) { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + dummyInt1++; + dummyInt2++; + } + } + } + } + + /** Perform a recursive-only synchronized on a local object within a loop. */ + @Benchmark + public void testInflatedRecursiveLockUnlockLocal() { + Object localObject = lockObject3Inflated; + for (int i = 0; i < innerCount; i++) { + synchronized (localObject) { + synchronized (localObject) { + dummyInt1++; + dummyInt2++; + } + } + } + } + + /** Perform a recursive-only synchronized on an object within a loop. */ + @Benchmark + public void testInflatedRecursiveLockUnlock() { + for (int i = 0; i < innerCount; i++) { + synchronized (lockObject3Inflated) { + synchronized (lockObject3Inflated) { + dummyInt1++; + dummyInt2++; + } + } + } + } + /** * Performs recursive synchronizations on the same local object. *

From 60c9b5cd9f18830f0fb1aea6cb3dc43af3908cc5 Mon Sep 17 00:00:00 2001 From: Andrey Turbanov Date: Fri, 16 Aug 2024 08:49:18 +0000 Subject: [PATCH 18/67] 8337839: Make a few fields in MergeCollation static Reviewed-by: jpai, naoto --- src/java.base/share/classes/java/text/MergeCollation.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/java.base/share/classes/java/text/MergeCollation.java b/src/java.base/share/classes/java/text/MergeCollation.java index 1db779e003f..8cac7871b56 100644 --- a/src/java.base/share/classes/java/text/MergeCollation.java +++ b/src/java.base/share/classes/java/text/MergeCollation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -220,9 +220,9 @@ public PatternEntry getItemAt(int index) { // Using BitSet would make this easier, but it's significantly slower. // private transient byte[] statusArray = new byte[8192]; - private final byte BITARRAYMASK = (byte)0x1; - private final int BYTEPOWER = 3; - private final int BYTEMASK = (1 << BYTEPOWER) - 1; + private static final byte BITARRAYMASK = (byte)0x1; + private static final int BYTEPOWER = 3; + private static final int BYTEMASK = (1 << BYTEPOWER) - 1; /* If the strength is RESET, then just change the lastEntry to From ddbc0b6a39148cb30a8fda80fa7290e90e2a77d6 Mon Sep 17 00:00:00 2001 From: Daniel Fuchs Date: Fri, 16 Aug 2024 11:24:40 +0000 Subject: [PATCH 19/67] 8338495: Revert "8336655: java/net/httpclient/DigestEchoClient.java IOException: HTTP/1.1 header parser received no bytes" Reviewed-by: jpai --- .../jdk/internal/net/http/ConnectionPool.java | 36 ++++--------------- .../jdk/internal/net/http/SocketTube.java | 23 ++---------- .../java/net/httpclient/DigestEchoClient.java | 4 +-- 3 files changed, 11 insertions(+), 52 deletions(-) diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java b/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java index edaf53a8a0d..0ad7b9d5992 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/ConnectionPool.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,6 @@ import jdk.internal.net.http.common.Deadline; import jdk.internal.net.http.common.FlowTube; -import jdk.internal.net.http.common.Log; import jdk.internal.net.http.common.Logger; import jdk.internal.net.http.common.TimeLine; import jdk.internal.net.http.common.TimeSource; @@ -493,13 +492,13 @@ void clear() { // Remove a connection from the pool. // should only be called while holding the ConnectionPool stateLock. - private boolean removeFromPool(HttpConnection c) { + private void removeFromPool(HttpConnection c) { assert stateLock.isHeldByCurrentThread(); if (c instanceof PlainHttpConnection) { - return removeFromPool(c, plainPool); + removeFromPool(c, plainPool); } else { assert c.isSecure() : "connection " + c + " is not secure!"; - return removeFromPool(c, sslPool); + removeFromPool(c, sslPool); } } @@ -530,29 +529,13 @@ void cleanup(HttpConnection c, Throwable error) { debug.log("%s : ConnectionPool.cleanup(%s)", String.valueOf(c.getConnectionFlow()), error); stateLock.lock(); - boolean removed; try { - removed = removeFromPool(c); + removeFromPool(c); expiryList.remove(c); } finally { stateLock.unlock(); } - if (!removed) { - // this should not happen; the cleanup may have consumed - // some data that wasn't supposed to be consumed, so - // the only thing we can do is log it and close the - // connection. - if (Log.errors()) { - Log.logError("WARNING: CleanupTrigger triggered for" + - " a connection not found in the pool: closing {0}", c); - } else if (debug.on()) { - debug.log("WARNING: CleanupTrigger triggered for" + - " a connection not found in the pool: closing %s", c); - } - c.close(new IOException("Unexpected cleanup triggered for non pooled connection")); - } else { - c.close(); - } + c.close(); } /** @@ -566,7 +549,6 @@ private final class CleanupTrigger implements private final HttpConnection connection; private volatile boolean done; - private volatile boolean dropped; public CleanupTrigger(HttpConnection connection) { this.connection = connection; @@ -584,7 +566,6 @@ private void triggerCleanup(Throwable error) { @Override public void onSubscribe(Flow.Subscription subscription) { - if (dropped || done) return; subscription.request(1); } @Override @@ -605,10 +586,5 @@ public void subscribe(Flow.Subscriber> subscriber) { public String toString() { return "CleanupTrigger(" + connection.getConnectionFlow() + ")"; } - - @Override - public void dropSubscription() { - dropped = true; - } } } diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java b/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java index 9317bdf442a..cbdf6633576 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/SocketTube.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -573,8 +573,6 @@ public void subscribe(Flow.Subscriber> s) { debug.log("read publisher: dropping pending subscriber: " + previous.subscriber); previous.errorRef.compareAndSet(null, errorRef.get()); - // make sure no data will be routed to the old subscriber. - previous.stopReading(); previous.signalOnSubscribe(); if (subscriptionImpl.completed) { previous.signalCompletion(); @@ -608,7 +606,6 @@ final class ReadSubscription implements Flow.Subscription { volatile boolean subscribed; volatile boolean cancelled; volatile boolean completed; - volatile boolean stopped; public ReadSubscription(InternalReadSubscription impl, TubeSubscriber subscriber) { @@ -626,11 +623,11 @@ public void cancel() { @Override public void request(long n) { - if (!cancelled && !stopped) { + if (!cancelled) { impl.request(n); } else { if (debug.on()) - debug.log("subscription stopped or cancelled, ignoring request %d", n); + debug.log("subscription cancelled, ignoring request %d", n); } } @@ -664,20 +661,6 @@ void signalOnSubscribe() { signalCompletion(); } } - - /** - * Called when switching subscriber on the {@link InternalReadSubscription}. - * This subscriber is the old subscriber. Demand on the internal - * subscription will be reset and reading will be paused until the - * new subscriber is subscribed. - * This should ensure that no data is routed to this subscriber - * until the new subscriber is subscribed. - */ - void stopReading() { - stopped = true; - impl.demand.reset(); - impl.pauseReadEvent(); - } } final class InternalReadSubscription implements Flow.Subscription { diff --git a/test/jdk/java/net/httpclient/DigestEchoClient.java b/test/jdk/java/net/httpclient/DigestEchoClient.java index 1450bf09b2d..3b6d1a1773f 100644 --- a/test/jdk/java/net/httpclient/DigestEchoClient.java +++ b/test/jdk/java/net/httpclient/DigestEchoClient.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ * @test * @summary this test verifies that a client may provides authorization * headers directly when connecting with a server. - * @bug 8087112 8336655 + * @bug 8087112 * @library /test/lib /test/jdk/java/net/httpclient/lib * @build jdk.httpclient.test.lib.common.HttpServerAdapters jdk.test.lib.net.SimpleSSLContext * DigestEchoServer ReferenceTracker DigestEchoClient From 5022109b2a33a8cf2608eb829098b27641b731a4 Mon Sep 17 00:00:00 2001 From: Shaojin Wen Date: Fri, 16 Aug 2024 13:18:02 +0000 Subject: [PATCH 20/67] 8336856: Efficient hidden class-based string concatenation strategy Co-authored-by: Claes Redestad Reviewed-by: redestad, liach --- .../classes/java/lang/StringConcatHelper.java | 343 ++++++++ .../share/classes/java/lang/System.java | 4 + .../java/lang/invoke/StringConcatFactory.java | 762 +++++++++++++++--- .../jdk/internal/access/JavaLangAccess.java | 2 + .../jdk/internal/util/ClassFileDumper.java | 9 +- .../String/concat/HiddenClassUnloading.java | 69 ++ .../openjdk/bench/java/lang/StringConcat.java | 149 +++- .../bench/java/lang/StringConcatStartup.java | 111 ++- 8 files changed, 1316 insertions(+), 133 deletions(-) create mode 100644 test/jdk/java/lang/String/concat/HiddenClassUnloading.java diff --git a/src/java.base/share/classes/java/lang/StringConcatHelper.java b/src/java.base/share/classes/java/lang/StringConcatHelper.java index 486e115369e..ae2b9693409 100644 --- a/src/java.base/share/classes/java/lang/StringConcatHelper.java +++ b/src/java.base/share/classes/java/lang/StringConcatHelper.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +29,7 @@ import jdk.internal.misc.Unsafe; import jdk.internal.util.DecimalDigits; import jdk.internal.vm.annotation.ForceInline; +import jdk.internal.vm.annotation.Stable; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -39,6 +41,98 @@ * combinators there. */ final class StringConcatHelper { + static abstract class StringConcatBase { + @Stable + final String[] constants; + final int length; + final byte coder; + + StringConcatBase(String[] constants) { + int length = 0; + byte coder = String.LATIN1; + for (String c : constants) { + length += c.length(); + coder |= c.coder(); + } + this.constants = constants; + this.length = length; + this.coder = coder; + } + } + + static final class Concat1 extends StringConcatBase { + Concat1(String[] constants) { + super(constants); + } + + @ForceInline + String concat0(String value) { + int length = stringSize(this.length, value); + byte coder = (byte) (this.coder | value.coder()); + byte[] buf = newArray(length << coder); + String prefix = constants[0]; + prefix.getBytes(buf, 0, coder); + value.getBytes(buf, prefix.length(), coder); + constants[1].getBytes(buf, prefix.length() + value.length(), coder); + return new String(buf, coder); + } + + @ForceInline + String concat(boolean value) { + int length = stringSize(this.length, value); + String suffix = constants[1]; + length -= suffix.length(); + byte[] buf = newArrayWithSuffix(suffix, length, coder); + prepend(length, coder, buf, value, constants[0]); + return new String(buf, coder); + } + + @ForceInline + String concat(char value) { + int length = stringSize(this.length, value); + byte coder = (byte) (this.coder | stringCoder(value)); + String suffix = constants[1]; + length -= suffix.length(); + byte[] buf = newArrayWithSuffix(suffix, length, coder); + prepend(length, coder, buf, value, constants[0]); + return new String(buf, coder); + } + + @ForceInline + String concat(int value) { + int length = stringSize(this.length, value); + String suffix = constants[1]; + length -= suffix.length(); + byte[] buf = newArrayWithSuffix(suffix, length, coder); + prepend(length, coder, buf, value, constants[0]); + return new String(buf, coder); + } + + @ForceInline + String concat(long value) { + int length = stringSize(this.length, value); + String suffix = constants[1]; + length -= suffix.length(); + byte[] buf = newArrayWithSuffix(suffix, length, coder); + prepend(length, coder, buf, value, constants[0]); + return new String(buf, coder); + } + + @ForceInline + String concat(Object value) { + return concat0(stringOf(value)); + } + + @ForceInline + String concat(float value) { + return concat0(Float.toString(value)); + } + + @ForceInline + String concat(double value) { + return concat0(Double.toString(value)); + } + } private StringConcatHelper() { // no instantiation @@ -375,6 +469,64 @@ static String stringOf(Object value) { private static final Unsafe UNSAFE = Unsafe.getUnsafe(); + static String stringOf(float value) { + return Float.toString(value); + } + + static String stringOf(double value) { + return Double.toString(value); + } + + /** + * return add stringSize of value + * @param length length + * @param value value to add stringSize + * @return new length + */ + static int stringSize(int length, char value) { + return checkOverflow(length + 1); + } + + /** + * return add stringSize of value + * @param length length + * @param value value to add stringSize + * @return new length + */ + static int stringSize(int length, boolean value) { + return checkOverflow(length + (value ? 4 : 5)); + } + + /** + * return add stringSize of value + * @param length length + * @param value value + * @return new length + */ + static int stringSize(int length, int value) { + return checkOverflow(length + DecimalDigits.stringSize(value)); + } + + /** + * return add stringSize of value + * @param length length + * @param value value to add stringSize + * @return new length + */ + static int stringSize(int length, long value) { + return checkOverflow(length + DecimalDigits.stringSize(value)); + } + + /** + * return add stringSize of value + * @param length length + * @param value value to add stringSize + * @return new length + */ + static int stringSize(int length, String value) { + return checkOverflow(length + value.length()); + } + /** * Allocates an uninitialized byte array based on the length and coder * information, then prepends the given suffix string at the end of the @@ -440,4 +592,195 @@ static MethodHandle lookupStatic(String name, MethodType methodType) { } } + /** + * Allocates an uninitialized byte array based on the length and coder + * information, then prepends the given suffix string at the end of the + * byte array before returning it. The calling code must adjust the + * indexCoder so that it's taken the coder of the suffix into account, but + * subtracted the length of the suffix. + * + * @param suffix + * @param indexCoder + * @return the newly allocated byte array + */ + @ForceInline + static byte[] newArrayWithSuffix(String suffix, int index, byte coder) { + byte[] buf = newArray((index + suffix.length()) << coder); + if (coder == String.LATIN1) { + suffix.getBytes(buf, index, String.LATIN1); + } else { + suffix.getBytes(buf, index, String.UTF16); + } + return buf; + } + + /** + * Return the coder for the character. + * @param value character + * @return coder + */ + static byte stringCoder(char value) { + return StringLatin1.canEncode(value) ? String.LATIN1 : String.UTF16; + } + + /** + * Prepends constant and the stringly representation of value into buffer, + * given the coder and final index. Index is measured in chars, not in bytes! + * + * @param index final char index in the buffer + * @param coder coder of the buffer + * @param buf buffer to append to + * @param value boolean value to encode + * @param prefix a constant to prepend before value + * @return updated index + */ + static int prepend(int index, byte coder, byte[] buf, boolean value, String prefix) { + if (coder == String.LATIN1) { + if (value) { + index -= 4; + buf[index] = 't'; + buf[index + 1] = 'r'; + buf[index + 2] = 'u'; + buf[index + 3] = 'e'; + } else { + index -= 5; + buf[index] = 'f'; + buf[index + 1] = 'a'; + buf[index + 2] = 'l'; + buf[index + 3] = 's'; + buf[index + 4] = 'e'; + } + index -= prefix.length(); + prefix.getBytes(buf, index, String.LATIN1); + } else { + if (value) { + index -= 4; + StringUTF16.putChar(buf, index, 't'); + StringUTF16.putChar(buf, index + 1, 'r'); + StringUTF16.putChar(buf, index + 2, 'u'); + StringUTF16.putChar(buf, index + 3, 'e'); + } else { + index -= 5; + StringUTF16.putChar(buf, index, 'f'); + StringUTF16.putChar(buf, index + 1, 'a'); + StringUTF16.putChar(buf, index + 2, 'l'); + StringUTF16.putChar(buf, index + 3, 's'); + StringUTF16.putChar(buf, index + 4, 'e'); + } + index -= prefix.length(); + prefix.getBytes(buf, index, String.UTF16); + } + return index; + } + + /** + * Prepends constant and the stringly representation of value into buffer, + * given the coder and final index. Index is measured in chars, not in bytes! + * + * @param index final char index in the buffer + * @param coder coder of the buffer + * @param buf buffer to append to + * @param value char value to encode + * @param prefix a constant to prepend before value + * @return updated index + */ + static int prepend(int index, byte coder, byte[] buf, char value, String prefix) { + if (coder == String.LATIN1) { + buf[--index] = (byte) (value & 0xFF); + index -= prefix.length(); + prefix.getBytes(buf, index, String.LATIN1); + } else { + StringUTF16.putChar(buf, --index, value); + index -= prefix.length(); + prefix.getBytes(buf, index, String.UTF16); + } + return index; + } + + /** + * Prepends constant and the stringly representation of value into buffer, + * given the coder and final index. Index is measured in chars, not in bytes! + * + * @param index final char index in the buffer + * @param coder coder of the buffer + * @param buf buffer to append to + * @param value int value to encode + * @param prefix a constant to prepend before value + * @return updated index + */ + static int prepend(int index, byte coder, byte[] buf, int value, String prefix) { + if (coder == String.LATIN1) { + index = StringLatin1.getChars(value, index, buf); + index -= prefix.length(); + prefix.getBytes(buf, index, String.LATIN1); + } else { + index = StringUTF16.getChars(value, index, buf); + index -= prefix.length(); + prefix.getBytes(buf, index, String.UTF16); + } + return index; + } + + /** + * Prepends constant and the stringly representation of value into buffer, + * given the coder and final index. Index is measured in chars, not in bytes! + * + * @param index final char index in the buffer + * @param coder coder of the buffer + * @param buf buffer to append to + * @param value long value to encode + * @param prefix a constant to prepend before value + * @return updated index + */ + static int prepend(int index, byte coder, byte[] buf, long value, String prefix) { + if (coder == String.LATIN1) { + index = StringLatin1.getChars(value, index, buf); + index -= prefix.length(); + prefix.getBytes(buf, index, String.LATIN1); + } else { + index = StringUTF16.getChars(value, index, buf); + index -= prefix.length(); + prefix.getBytes(buf, index, String.UTF16); + } + return index; + } + + /** + * Prepends constant and the stringly representation of value into buffer, + * given the coder and final index. Index is measured in chars, not in bytes! + * + * @param index final char index in the buffer + * @param coder coder of the buffer + * @param buf buffer to append to + * @param value boolean value to encode + * @param prefix a constant to prepend before value + * @return updated index + */ + static int prepend(int index, byte coder, byte[] buf, String value, String prefix) { + index -= value.length(); + if (coder == String.LATIN1) { + value.getBytes(buf, index, String.LATIN1); + index -= prefix.length(); + prefix.getBytes(buf, index, String.LATIN1); + } else { + value.getBytes(buf, index, String.UTF16); + index -= prefix.length(); + prefix.getBytes(buf, index, String.UTF16); + } + return index; + } + + /** + * Check for overflow, throw exception on overflow. + * + * @param value + * @return the given parameter value, if valid + */ + @ForceInline + static int checkOverflow(int value) { + if (value >= 0) { + return value; + } + throw new OutOfMemoryError("Overflow: String length out of range"); + } } diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java index 0947da8ded7..5ff4796505b 100644 --- a/src/java.base/share/classes/java/lang/System.java +++ b/src/java.base/share/classes/java/lang/System.java @@ -2623,6 +2623,10 @@ public long stringConcatMix(long lengthCoder, char value) { return StringConcatHelper.mix(lengthCoder, value); } + public Object stringConcat1(String[] constants) { + return new StringConcatHelper.Concat1(constants); + } + public int getCharsLatin1(long i, int index, byte[] buf) { return StringLatin1.getChars(i, index, buf); } diff --git a/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java b/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java index cf552c434be..dd262193574 100644 --- a/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java +++ b/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,23 +31,33 @@ import jdk.internal.constant.ConstantUtils; import jdk.internal.misc.VM; import jdk.internal.util.ClassFileDumper; +import jdk.internal.util.ReferenceKey; +import jdk.internal.util.ReferencedKeyMap; import jdk.internal.vm.annotation.Stable; import sun.invoke.util.Wrapper; +import java.lang.classfile.Annotation; import java.lang.classfile.ClassBuilder; import java.lang.classfile.ClassFile; import java.lang.classfile.CodeBuilder; +import java.lang.classfile.MethodBuilder; import java.lang.classfile.TypeKind; +import java.lang.classfile.attribute.RuntimeVisibleAnnotationsAttribute; import java.lang.constant.ClassDesc; -import java.lang.constant.ConstantDescs; import java.lang.constant.MethodTypeDesc; import java.lang.invoke.MethodHandles.Lookup; -import java.lang.reflect.AccessFlag; +import java.lang.ref.SoftReference; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Consumer; +import java.util.function.Supplier; -import static java.lang.invoke.MethodHandles.Lookup.ClassOption.STRONG; +import static java.lang.classfile.ClassFile.*; +import static java.lang.constant.ConstantDescs.*; import static java.lang.invoke.MethodType.methodType; /** @@ -107,12 +118,15 @@ * @since 9 */ public final class StringConcatFactory { - private static final int HIGH_ARITY_THRESHOLD; + private static final int FORCE_INLINE_THRESHOLD; static { String highArity = VM.getSavedProperty("java.lang.invoke.StringConcat.highArityThreshold"); - HIGH_ARITY_THRESHOLD = highArity != null ? Integer.parseInt(highArity) : 20; + HIGH_ARITY_THRESHOLD = highArity != null ? Integer.parseInt(highArity) : 0; + + String inlineThreshold = VM.getSavedProperty("java.lang.invoke.StringConcat.inlineThreshold"); + FORCE_INLINE_THRESHOLD = inlineThreshold != null ? Integer.parseInt(inlineThreshold) : 16; } /** @@ -371,14 +385,17 @@ public static CallSite makeConcatWithConstants(MethodHandles.Lookup lookup, } try { - if (concatType.parameterCount() <= HIGH_ARITY_THRESHOLD) { - return new ConstantCallSite( - generateMHInlineCopy(concatType, constantStrings) - .viewAsType(concatType, true)); - } else { - return new ConstantCallSite( - SimpleStringBuilderStrategy.generate(lookup, concatType, constantStrings)); + MethodHandle mh = makeSimpleConcat(concatType, constantStrings); + if (mh == null && concatType.parameterCount() <= HIGH_ARITY_THRESHOLD) { + mh = generateMHInlineCopy(concatType, constantStrings); } + + if (mh == null) { + mh = InlineHiddenClassStrategy.generate(lookup, concatType, constantStrings); + } + mh = mh.viewAsType(concatType, true); + + return new ConstantCallSite(mh); } catch (Error e) { // Pass through any error throw e; @@ -427,7 +444,7 @@ private static String[] parseRecipe(MethodType concatType, } // Flush any accumulated characters into a constant - consts[oCount++] = acc.length() > 0 ? acc.toString() : null; + consts[oCount++] = acc.length() > 0 ? acc.toString() : ""; acc.setLength(0); } else { // Not a special character, this is a constant embedded into @@ -443,7 +460,7 @@ private static String[] parseRecipe(MethodType concatType, } // Flush the remaining characters as constant: - consts[oCount] = acc.length() > 0 ? acc.toString() : null; + consts[oCount] = acc.length() > 0 ? acc.toString() : ""; return consts; } @@ -466,14 +483,7 @@ private static StringConcatException constantMismatch(Object[] constants, " are passed"); } - /** - *

This strategy replicates what StringBuilders are doing: it builds the - * byte[] array on its own and passes that byte[] array to String - * constructor. This strategy requires access to some private APIs in JDK, - * most notably, the private String constructor that accepts byte[] arrays - * without copying. - */ - private static MethodHandle generateMHInlineCopy(MethodType mt, String[] constants) { + private static MethodHandle makeSimpleConcat(MethodType mt, String[] constants) { int paramCount = mt.parameterCount(); String suffix = constants[paramCount]; @@ -484,22 +494,37 @@ private static MethodHandle generateMHInlineCopy(MethodType mt, String[] constan if (paramCount == 1) { String prefix = constants[0]; // Empty constants will be - if (prefix == null) { - if (suffix == null) { + if (prefix.isEmpty()) { + if (suffix.isEmpty()) { return unaryConcat(mt.parameterType(0)); } else if (!mt.hasPrimitives()) { return MethodHandles.insertArguments(simpleConcat(), 1, suffix); } // else fall-through - } else if (suffix == null && !mt.hasPrimitives()) { + } else if (suffix.isEmpty() && !mt.hasPrimitives()) { // Non-primitive argument return MethodHandles.insertArguments(simpleConcat(), 0, prefix); } // fall-through if there's both a prefix and suffix - } - if (paramCount == 2 && !mt.hasPrimitives() && suffix == null - && constants[0] == null && constants[1] == null) { + } else if (paramCount == 2 && !mt.hasPrimitives() && suffix.isEmpty() + && constants[0].isEmpty() && constants[1].isEmpty()) { // Two reference arguments, no surrounding constants return simpleConcat(); } + + return null; + } + + /** + *

This strategy replicates what StringBuilders are doing: it builds the + * byte[] array on its own and passes that byte[] array to String + * constructor. This strategy requires access to some private APIs in JDK, + * most notably, the private String constructor that accepts byte[] arrays + * without copying. + */ + private static MethodHandle generateMHInlineCopy(MethodType mt, String[] constants) { + int paramCount = mt.parameterCount(); + String suffix = constants[paramCount]; + + // else... fall-through to slow-path // Create filters and obtain filtered parameter types. Filters would be used in the beginning @@ -1043,139 +1068,638 @@ private StringConcatFactory() { } /** - * Bytecode StringBuilder strategy. + * Implement efficient hidden class strategy for String concatenation * - *

This strategy emits StringBuilder chains as similar as possible - * to what javac would. No exact sizing of parameters or estimates. + *

This strategy replicates based on the bytecode what StringBuilders are doing: it builds the + * byte[] array on its own and passes that byte[] array to String + * constructor. This strategy requires access to some private APIs in JDK, + * most notably, the private String constructor that accepts byte[] arrays + * without copying. */ - private static final class SimpleStringBuilderStrategy { - static final String METHOD_NAME = "concat"; - static final ClassDesc STRING_BUILDER = ClassDesc.ofDescriptor("Ljava/lang/StringBuilder;"); + private static final class InlineHiddenClassStrategy { + static final String CLASS_NAME = "java.lang.String$$StringConcat"; + static final String METHOD_NAME = "concat"; + static final ClassFileDumper DUMPER = ClassFileDumper.getInstance("java.lang.invoke.StringConcatFactory.dump", "stringConcatClasses"); - static final MethodTypeDesc APPEND_BOOLEAN_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_boolean); - static final MethodTypeDesc APPEND_CHAR_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_char); - static final MethodTypeDesc APPEND_DOUBLE_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_double); - static final MethodTypeDesc APPEND_FLOAT_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_float); - static final MethodTypeDesc APPEND_INT_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_int); - static final MethodTypeDesc APPEND_LONG_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_long); - static final MethodTypeDesc APPEND_OBJECT_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_Object); - static final MethodTypeDesc APPEND_STRING_TYPE = MethodTypeDesc.of(STRING_BUILDER, ConstantDescs.CD_String); - static final MethodTypeDesc INT_CONSTRUCTOR_TYPE = MethodTypeDesc.of(ConstantDescs.CD_void, ConstantDescs.CD_int); - static final MethodTypeDesc TO_STRING_TYPE = MethodTypeDesc.of(ConstantDescs.CD_String); + static final MethodHandles.Lookup STR_LOOKUP = new MethodHandles.Lookup(String.class); + + static final ClassDesc CD_CONCAT = ConstantUtils.binaryNameToDesc(CLASS_NAME); + static final ClassDesc CD_StringConcatHelper = ClassDesc.ofDescriptor("Ljava/lang/StringConcatHelper;"); + static final ClassDesc CD_StringConcatBase = ClassDesc.ofDescriptor("Ljava/lang/StringConcatHelper$StringConcatBase;"); + static final ClassDesc CD_Array_byte = ClassDesc.ofDescriptor("[B"); + static final ClassDesc CD_Array_String = ClassDesc.ofDescriptor("[Ljava/lang/String;"); + + static final MethodTypeDesc MTD_byte_char = MethodTypeDesc.of(CD_byte, CD_char); + static final MethodTypeDesc MTD_byte = MethodTypeDesc.of(CD_byte); + static final MethodTypeDesc MTD_int = MethodTypeDesc.of(CD_int); + static final MethodTypeDesc MTD_int_int_boolean = MethodTypeDesc.of(CD_int, CD_int, CD_boolean); + static final MethodTypeDesc MTD_int_int_char = MethodTypeDesc.of(CD_int, CD_int, CD_char); + static final MethodTypeDesc MTD_int_int_int = MethodTypeDesc.of(CD_int, CD_int, CD_int); + static final MethodTypeDesc MTD_int_int_long = MethodTypeDesc.of(CD_int, CD_int, CD_long); + static final MethodTypeDesc MTD_int_int_String = MethodTypeDesc.of(CD_int, CD_int, CD_String); + static final MethodTypeDesc MTD_String_float = MethodTypeDesc.of(CD_String, CD_float); + static final MethodTypeDesc MTD_String_double = MethodTypeDesc.of(CD_String, CD_double); + static final MethodTypeDesc MTD_String_Object = MethodTypeDesc.of(CD_String, CD_Object); + + static final MethodTypeDesc MTD_INIT = MethodTypeDesc.of(CD_void, CD_Array_String); + static final MethodTypeDesc MTD_NEW_ARRAY_SUFFIX = MethodTypeDesc.of(CD_Array_byte, CD_String, CD_int, CD_byte); + static final MethodTypeDesc MTD_STRING_INIT = MethodTypeDesc.of(CD_void, CD_Array_byte, CD_byte); + + static final MethodTypeDesc PREPEND_int = MethodTypeDesc.of(CD_int, CD_int, CD_byte, CD_Array_byte, CD_int, CD_String); + static final MethodTypeDesc PREPEND_long = MethodTypeDesc.of(CD_int, CD_int, CD_byte, CD_Array_byte, CD_long, CD_String); + static final MethodTypeDesc PREPEND_boolean = MethodTypeDesc.of(CD_int, CD_int, CD_byte, CD_Array_byte, CD_boolean, CD_String); + static final MethodTypeDesc PREPEND_char = MethodTypeDesc.of(CD_int, CD_int, CD_byte, CD_Array_byte, CD_char, CD_String); + static final MethodTypeDesc PREPEND_String = MethodTypeDesc.of(CD_int, CD_int, CD_byte, CD_Array_byte, CD_String, CD_String); + + static final RuntimeVisibleAnnotationsAttribute FORCE_INLINE = RuntimeVisibleAnnotationsAttribute.of(Annotation.of(ClassDesc.ofDescriptor("Ljdk/internal/vm/annotation/ForceInline;"))); + + static final MethodType CONSTRUCTOR_METHOD_TYPE = MethodType.methodType(void.class, String[].class); + static final Consumer CONSTRUCTOR_BUILDER = new Consumer() { + @Override + public void accept(CodeBuilder cb) { + /* + * super(constants); + */ + int thisSlot = cb.receiverSlot(), + constantsSlot = cb.parameterSlot(0); + cb.aload(thisSlot) + .aload(constantsSlot) + .invokespecial(CD_StringConcatBase, INIT_NAME, MTD_INIT, false) + .return_(); + } + }; + + static final ReferencedKeyMap> CACHE = + ReferencedKeyMap.create(true, true, + new Supplier<>() { + @Override + public Map, SoftReference> get() { + return new ConcurrentHashMap<>(64); + } + }); + + private InlineHiddenClassStrategy() { + // no instantiation + } + + private record MethodHandlePair(MethodHandle constructor, MethodHandle concatenator) { }; /** - * Ensure a capacity in the initial StringBuilder to accommodate all - * constants plus this factor times the number of arguments. + * The parameter types are normalized into 7 types: int,long,boolean,char,float,double,Object */ - static final int ARGUMENT_SIZE_FACTOR = 4; + private static MethodType erasedArgs(MethodType args) { + int parameterCount = args.parameterCount(); + var paramTypes = new Class[parameterCount]; + boolean changed = false; + for (int i = 0; i < parameterCount; i++) { + Class cl = args.parameterType(i); + // Use int as the logical type for subword integral types + // (byte and short). char and boolean require special + // handling so don't change the logical type of those + if (cl == byte.class || cl == short.class) { + cl = int.class; + changed = true; + } else if (cl != Object.class && !cl.isPrimitive()) { + cl = Object.class; + changed = true; + } + paramTypes[i] = cl; + } + return changed ? MethodType.methodType(args.returnType(), paramTypes) : args; + } - static final Set SET_OF_STRONG = Set.of(STRONG); + /** + * Construct the MethodType of the prepend method, The parameters only support 5 types: + * int/long/char/boolean/String. Not int/long/char/boolean type, use String type

+ * + * The following is an example of the generated target code: + *

+         *  int prepend(int length, byte coder, byte[] buff,  String[] constants
+         *      int arg0, long arg1, boolean arg2, char arg3, String arg5)
+         * 
+ */ + private static MethodTypeDesc prependArgs(MethodType concatArgs) { + int parameterCount = concatArgs.parameterCount(); + var paramTypes = new ClassDesc[parameterCount + 4]; + paramTypes[0] = CD_int; // length + paramTypes[1] = CD_byte; // coder + paramTypes[2] = CD_Array_byte; // buff + paramTypes[3] = CD_Array_String; // constants + + for (int i = 0; i < parameterCount; i++) { + var cl = concatArgs.parameterType(i); + paramTypes[i + 4] = needStringOf(cl) ? CD_String : ConstantUtils.classDesc(cl); + } + return MethodTypeDesc.of(CD_int, paramTypes); + } - private SimpleStringBuilderStrategy() { - // no instantiation + /** + * Construct the MethodType of the coder method, + * The first parameter is the initialized coder, Only parameter types that can be UTF16 are added. + */ + private static MethodTypeDesc coderArgs(MethodType concatArgs) { + int parameterCount = concatArgs.parameterCount(); + List paramTypes = new ArrayList<>(); + paramTypes.add(CD_int); // init coder + for (int i = 0; i < parameterCount; i++) { + var cl = concatArgs.parameterType(i); + if (maybeUTF16(cl)) { + paramTypes.add(cl == char.class ? CD_char : CD_String); + } + } + return MethodTypeDesc.of(CD_int, paramTypes); + } + + /** + * Construct the MethodType of the length method, + * The first parameter is the initialized length + */ + private static MethodTypeDesc lengthArgs(MethodType concatArgs) { + int parameterCount = concatArgs.parameterCount(); + var paramTypes = new ClassDesc[parameterCount + 1]; + paramTypes[0] = CD_int; // init long + for (int i = 0; i < parameterCount; i++) { + var cl = concatArgs.parameterType(i); + paramTypes[i + 1] = needStringOf(cl) ? CD_String : ConstantUtils.classDesc(cl); + } + return MethodTypeDesc.of(CD_int, paramTypes); } private static MethodHandle generate(Lookup lookup, MethodType args, String[] constants) throws Exception { - String className = getClassName(lookup.lookupClass()); + lookup = STR_LOOKUP; + final MethodType concatArgs = erasedArgs(args); + + // 1 argment use built-in method + if (args.parameterCount() == 1) { + Object concat1 = JLA.stringConcat1(constants); + var handle = lookup.findVirtual(concat1.getClass(), METHOD_NAME, concatArgs); + return handle.bindTo(concat1); + } - byte[] classBytes = ClassFile.of().build(ConstantUtils.binaryNameToDesc(className), + var weakConstructorHandle = CACHE.get(concatArgs); + if (weakConstructorHandle != null) { + MethodHandlePair handlePair = weakConstructorHandle.get(); + if (handlePair != null) { + try { + var instance = handlePair.constructor.invoke(constants); + return handlePair.concatenator.bindTo(instance); + } catch (Throwable e) { + throw new StringConcatException("Exception while utilizing the hidden class", e); + } + } + } + MethodTypeDesc lengthArgs = lengthArgs(concatArgs), + coderArgs = parameterMaybeUTF16(concatArgs) ? coderArgs(concatArgs) : null, + prependArgs = prependArgs(concatArgs); + + byte[] classBytes = ClassFile.of().build(CD_CONCAT, new Consumer() { + final boolean forceInline = concatArgs.parameterCount() < FORCE_INLINE_THRESHOLD; + @Override public void accept(ClassBuilder clb) { - clb.withFlags(AccessFlag.FINAL, AccessFlag.SUPER, AccessFlag.SYNTHETIC) - .withMethodBody(METHOD_NAME, - ConstantUtils.methodTypeDesc(args), - ClassFile.ACC_FINAL | ClassFile.ACC_PRIVATE | ClassFile.ACC_STATIC, - generateMethod(constants, args)); + clb.withSuperclass(CD_StringConcatBase) + .withFlags(ACC_FINAL | ACC_SUPER | ACC_SYNTHETIC) + .withMethodBody(INIT_NAME, MTD_INIT, 0, CONSTRUCTOR_BUILDER) + .withMethod("length", + lengthArgs, + ACC_STATIC | ACC_PRIVATE, + new Consumer() { + public void accept(MethodBuilder mb) { + if (forceInline) { + mb.with(FORCE_INLINE); + } + mb.withCode(generateLengthMethod(lengthArgs)); + } + }) + .withMethod("prepend", + prependArgs, + ACC_STATIC | ACC_PRIVATE, + new Consumer() { + public void accept(MethodBuilder mb) { + if (forceInline) { + mb.with(FORCE_INLINE); + } + mb.withCode(generatePrependMethod(prependArgs)); + } + }) + .withMethod(METHOD_NAME, + ConstantUtils.methodTypeDesc(concatArgs), + ACC_FINAL, + new Consumer() { + public void accept(MethodBuilder mb) { + if (forceInline) { + mb.with(FORCE_INLINE); + } + mb.withCode(generateConcatMethod( + CD_CONCAT, + concatArgs, + lengthArgs, + coderArgs, + prependArgs)); + } + }); + + if (coderArgs != null) { + clb.withMethod("coder", + coderArgs, + ACC_STATIC | ACC_PRIVATE, + new Consumer() { + public void accept(MethodBuilder mb) { + if (forceInline) { + mb.with(FORCE_INLINE); + } + mb.withCode(generateCoderMethod(coderArgs)); + } + }); + } }}); try { - Lookup hiddenLookup = lookup.makeHiddenClassDefiner(className, classBytes, SET_OF_STRONG, DUMPER) - .defineClassAsLookup(true); - Class innerClass = hiddenLookup.lookupClass(); - return hiddenLookup.findStatic(innerClass, METHOD_NAME, args); - } catch (Exception e) { + var hiddenClass = lookup.makeHiddenClassDefiner(CLASS_NAME, classBytes, Set.of(), DUMPER) + .defineClass(true, null); + var constructor = lookup.findConstructor(hiddenClass, CONSTRUCTOR_METHOD_TYPE); + var concat = lookup.findVirtual(hiddenClass, METHOD_NAME, concatArgs); + CACHE.put(concatArgs, new SoftReference<>(new MethodHandlePair(constructor, concat))); + var instance = hiddenClass.cast(constructor.invoke(constants)); + return concat.bindTo(instance); + } catch (Throwable e) { throw new StringConcatException("Exception while spinning the class", e); } } - private static Consumer generateMethod(String[] constants, MethodType args) { + /** + * Generate InlineCopy-based code.

+ * + * The following is an example of the generated target code: + * + *

+         *  import static java.lang.StringConcatHelper.newArrayWithSuffix;
+         *  import static java.lang.StringConcatHelper.prepend;
+         *  import static java.lang.StringConcatHelper.stringCoder;
+         *  import static java.lang.StringConcatHelper.stringSize;
+         *
+         *  class StringConcat extends java.lang.StringConcatHelper.StringConcatBase {
+         *      // super class defines
+         *      // String[] constants;
+         *      // int length;
+         *      // byte coder;
+         *
+         *      StringConcat(String[] constants) {
+         *          super(constants);
+         *      }
+         *
+         *      String concat(int arg0, long arg1, boolean arg2, char arg3, String arg4,
+         *          float arg5, double arg6, Object arg7
+         *      ) {
+         *          // Types other than byte/short/int/long/boolean/String require a local variable to store
+         *          String str4 = stringOf(arg4);
+         *          String str5 = stringOf(arg5);
+         *          String str6 = stringOf(arg6);
+         *          String str7 = stringOf(arg7);
+         *
+         *          int coder  = coder(this.coder, arg0, arg1, arg2, arg3, str4, str5, str6, str7);
+         *          int length = length(this.length, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+         *          String[] constants = this.constants;
+         *          byte[] buf = newArrayWithSuffix(constants[paramCount], length. coder);
+         *
+         *          prepend(length, coder, buf, constants, arg0, arg1, arg2, arg3, str4, str5, str6, str7);
+         *
+         *          return new String(buf, coder);
+         *      }
+         *
+         *      static int length(int length, int arg0, long arg1, boolean arg2, char arg3,
+         *                       String arg4, String arg5, String arg6, String arg7) {
+         *          return stringSize(stringSize(stringSize(stringSize(stringSize(stringSize(stringSize(stringSize(
+         *                      length, arg0), arg1), arg2), arg3), arg4), arg5), arg6), arg7);
+         *      }
+         *
+         *      static int cocder(int coder, char arg3, String str4, String str5, String str6, String str7) {
+         *          return coder | stringCoder(arg3) | str4.coder() | str5.coder() | str6.coder() | str7.coder();
+         *      }
+         *
+         *      static int prepend(int length, int coder, byte[] buf, String[] constants,
+         *                     int arg0, long arg1, boolean arg2, char arg3,
+         *                     String str4, String str5, String str6, String str7) {
+         *          // StringConcatHelper.prepend
+         *          return prepend(prepend(prepend(prepend(
+         *                  prepend(apppend(prepend(prepend(length,
+         *                       buf, str7, constant[7]), buf, str6, constant[6]),
+         *                       buf, str5, constant[5]), buf, str4, constant[4]),
+         *                       buf, arg3, constant[3]), buf, arg2, constant[2]),
+         *                       buf, arg1, constant[1]), buf, arg0, constant[0]);
+         *      }
+         *  }
+         * 
+ */ + private static Consumer generateConcatMethod( + ClassDesc concatClass, + MethodType concatArgs, + MethodTypeDesc lengthArgs, + MethodTypeDesc coderArgs, + MethodTypeDesc prependArgs + ) { return new Consumer() { @Override public void accept(CodeBuilder cb) { - cb.new_(STRING_BUILDER); - cb.dup(); - - int len = 0; - for (String constant : constants) { - if (constant != null) { - len += constant.length(); + // Compute parameter variable slots + int paramCount = concatArgs.parameterCount(), + thisSlot = cb.receiverSlot(), + lengthSlot = cb.allocateLocal(TypeKind.IntType), + coderSlot = cb.allocateLocal(TypeKind.ByteType), + bufSlot = cb.allocateLocal(TypeKind.ReferenceType), + constantsSlot = cb.allocateLocal(TypeKind.ReferenceType), + suffixSlot = cb.allocateLocal(TypeKind.ReferenceType); + + /* + * Types other than int/long/char/boolean require local variables to store the result of stringOf. + * + * stringSlots stores the slots of parameters relative to local variables + * + * str0 = stringOf(arg0); + * str1 = stringOf(arg1); + * ... + * strN = toString(argN); + */ + int[] stringSlots = new int[paramCount]; + for (int i = 0; i < paramCount; i++) { + var cl = concatArgs.parameterType(i); + if (needStringOf(cl)) { + MethodTypeDesc methodTypeDesc; + if (cl == float.class) { + methodTypeDesc = MTD_String_float; + } else if (cl == double.class) { + methodTypeDesc = MTD_String_double; + } else { + methodTypeDesc = MTD_String_Object; + } + stringSlots[i] = cb.allocateLocal(TypeKind.ReferenceType); + cb.loadLocal(TypeKind.from(cl), cb.parameterSlot(i)) + .invokestatic(CD_StringConcatHelper, "stringOf", methodTypeDesc) + .astore(stringSlots[i]); } } - len += args.parameterCount() * ARGUMENT_SIZE_FACTOR; - cb.loadConstant(len); - cb.invokespecial(STRING_BUILDER, "", INT_CONSTRUCTOR_TYPE); - - // At this point, we have a blank StringBuilder on stack, fill it in with .append calls. - { - int off = 0; - for (int c = 0; c < args.parameterCount(); c++) { - if (constants[c] != null) { - cb.ldc(constants[c]); - cb.invokevirtual(STRING_BUILDER, "append", APPEND_STRING_TYPE); + + /* + * coder = coder(this.coder, arg0, arg1, ... argN); + */ + cb.aload(thisSlot) + .getfield(concatClass, "coder", CD_byte); + if (coderArgs != null) { + for (int i = 0; i < paramCount; i++) { + var cl = concatArgs.parameterType(i); + if (maybeUTF16(cl)) { + if (cl == char.class) { + cb.loadLocal(TypeKind.CharType, cb.parameterSlot(i)); + } else { + cb.aload(stringSlots[i]); + } } - Class cl = args.parameterType(c); - TypeKind kind = TypeKind.from(cl); - cb.loadLocal(kind, off); - off += kind.slotSize(); - MethodTypeDesc desc = getSBAppendDesc(cl); - cb.invokevirtual(STRING_BUILDER, "append", desc); } - if (constants[constants.length - 1] != null) { - cb.ldc(constants[constants.length - 1]); - cb.invokevirtual(STRING_BUILDER, "append", APPEND_STRING_TYPE); + cb.invokestatic(concatClass, "coder", coderArgs); + } + cb.istore(coderSlot); + + /* + * length = length(this.length, arg0, arg1, ..., argN); + */ + cb.aload(thisSlot) + .getfield(concatClass, "length", CD_int); + for (int i = 0; i < paramCount; i++) { + var cl = concatArgs.parameterType(i); + int paramSlot = cb.parameterSlot(i); + if (needStringOf(cl)) { + paramSlot = stringSlots[i]; + cl = String.class; + } + cb.loadLocal(TypeKind.from(cl), paramSlot); + } + cb.invokestatic(concatClass, "length", lengthArgs); + + /* + * String[] constants = this.constants; + * suffix = constants[paranCount]; + * length -= suffix.length(); + */ + cb.aload(thisSlot) + .getfield(concatClass, "constants", CD_Array_String) + .dup() + .astore(constantsSlot) + .ldc(paramCount) + .aaload() + .dup() + .astore(suffixSlot) + .invokevirtual(CD_String, "length", MTD_int) + .isub() + .istore(lengthSlot); + + /* + * Allocate buffer : + * + * buf = newArrayWithSuffix(suffix, length, coder) + */ + cb.aload(suffixSlot) + .iload(lengthSlot) + .iload(coderSlot) + .invokestatic(CD_StringConcatHelper, "newArrayWithSuffix", MTD_NEW_ARRAY_SUFFIX) + .astore(bufSlot); + + /* + * prepend(length, coder, buf, constants, ar0, ar1, ..., argN); + */ + cb.iload(lengthSlot) + .iload(coderSlot) + .aload(bufSlot) + .aload(constantsSlot); + for (int i = 0; i < paramCount; i++) { + var cl = concatArgs.parameterType(i); + int paramSlot = cb.parameterSlot(i); + var kind = TypeKind.from(cl); + if (needStringOf(cl)) { + paramSlot = stringSlots[i]; + kind = TypeKind.ReferenceType; } + cb.loadLocal(kind, paramSlot); } + cb.invokestatic(concatClass, "prepend", prependArgs); + + // return new String(buf, coder); + cb.new_(CD_String) + .dup() + .aload(bufSlot) + .iload(coderSlot) + .invokespecial(CD_String, INIT_NAME, MTD_STRING_INIT) + .areturn(); + } + }; + } - cb.invokevirtual(STRING_BUILDER, "toString", TO_STRING_TYPE); - cb.areturn(); + /** + * Generate length method.

+ * + * The following is an example of the generated target code: + * + *

+         * import static java.lang.StringConcatHelper.stringSize;
+         *
+         * static int length(int length, int arg0, long arg1, boolean arg2, char arg3,
+         *                  String arg4, String arg5, String arg6, String arg7) {
+         *     return stringSize(stringSize(stringSize(length, arg0), arg1), ..., arg7);
+         * }
+         * 
+ */ + private static Consumer generateLengthMethod(MethodTypeDesc lengthArgs) { + return new Consumer() { + @Override + public void accept(CodeBuilder cb) { + int lengthSlot = cb.parameterSlot(0); + cb.iload(lengthSlot); + for (int i = 1; i < lengthArgs.parameterCount(); i++) { + var cl = lengthArgs.parameterType(i); + MethodTypeDesc methodTypeDesc; + if (cl == CD_char) { + methodTypeDesc = MTD_int_int_char; + } else if (cl == CD_int) { + methodTypeDesc = MTD_int_int_int; + } else if (cl == CD_long) { + methodTypeDesc = MTD_int_int_long; + } else if (cl == CD_boolean) { + methodTypeDesc = MTD_int_int_boolean; + } else { + methodTypeDesc = MTD_int_int_String; + } + cb.loadLocal(TypeKind.from(cl), cb.parameterSlot(i)) + .invokestatic(CD_StringConcatHelper, "stringSize", methodTypeDesc); + } + cb.ireturn(); } }; } /** - * The generated class is in the same package as the host class as - * it's the implementation of the string concatenation for the host - * class. + * Generate coder method.

+ * + * The following is an example of the generated target code: + * + *

+         * import static java.lang.StringConcatHelper.stringCoder;
+         *
+         * static int cocder(int coder, char arg3, String str4, String str5, String str6, String str7) {
+         *     return coder | stringCoder(arg3) | str4.coder() | str5.coder() | str6.coder() | str7.coder();
+         * }
+         * 
*/ - private static String getClassName(Class hostClass) { - String name = hostClass.isHidden() ? hostClass.getName().replace('/', '_') - : hostClass.getName(); - return name + "$$StringConcat"; + private static Consumer generateCoderMethod(MethodTypeDesc coderArgs) { + return new Consumer() { + @Override + public void accept(CodeBuilder cb) { + /* + * return coder | stringCoder(argN) | ... | arg1.coder() | arg0.coder(); + */ + int coderSlot = cb.parameterSlot(0); + cb.iload(coderSlot); + for (int i = 1; i < coderArgs.parameterCount(); i++) { + var cl = coderArgs.parameterType(i); + cb.loadLocal(TypeKind.from(cl), cb.parameterSlot(i)); + if (cl == CD_char) { + cb.invokestatic(CD_StringConcatHelper, "stringCoder", MTD_byte_char); + } else { + cb.invokevirtual(CD_String, "coder", MTD_byte); + } + cb.ior(); + } + cb.ireturn(); + } + }; } - private static MethodTypeDesc getSBAppendDesc(Class cl) { - if (cl.isPrimitive()) { - if (cl == Integer.TYPE || cl == Byte.TYPE || cl == Short.TYPE) { - return APPEND_INT_TYPE; - } else if (cl == Boolean.TYPE) { - return APPEND_BOOLEAN_TYPE; - } else if (cl == Character.TYPE) { - return APPEND_CHAR_TYPE; - } else if (cl == Double.TYPE) { - return APPEND_DOUBLE_TYPE; - } else if (cl == Float.TYPE) { - return APPEND_FLOAT_TYPE; - } else if (cl == Long.TYPE) { - return APPEND_LONG_TYPE; - } else { - throw new IllegalStateException("Unhandled primitive StringBuilder.append: " + cl); + /** + * Generate prepend method.

+ * + * The following is an example of the generated target code: + * + *

+         * import static java.lang.StringConcatHelper.prepend;
+         *
+         * static int prepend(int length, int coder, byte[] buf, String[] constants,
+         *                int arg0, long arg1, boolean arg2, char arg3,
+         *                String str4, String str5, String str6, String str7) {
+         *
+         *     return prepend(prepend(prepend(prepend(
+         *             prepend(prepend(prepend(prepend(length,
+         *                  buf, str7, constant[7]), buf, str6, constant[6]),
+         *                  buf, str5, constant[5]), buf, str4, constant[4]),
+         *                  buf, arg3, constant[3]), buf, arg2, constant[2]),
+         *                  buf, arg1, constant[1]), buf, arg0, constant[0]);
+         * }
+         * 
+ */ + private static Consumer generatePrependMethod(MethodTypeDesc prependArgs) { + return new Consumer() { + @Override + public void accept(CodeBuilder cb) { + // Compute parameter variable slots + int lengthSlot = cb.parameterSlot(0), + coderSlot = cb.parameterSlot(1), + bufSlot = cb.parameterSlot(2), + constantsSlot = cb.parameterSlot(3); + /* + * // StringConcatHelper.prepend + * return prepend(prepend(prepend(prepend( + * prepend(apppend(prepend(prepend(length, + * buf, str7, constant[7]), buf, str6, constant[6]), + * buf, str5, constant[5]), buf, arg4, constant[4]), + * buf, arg3, constant[3]), buf, arg2, constant[2]), + * buf, arg1, constant[1]), buf, arg0, constant[0]); + */ + cb.iload(lengthSlot); + for (int i = prependArgs.parameterCount() - 1; i >= 4; i--) { + var cl = prependArgs.parameterType(i); + var kind = TypeKind.from(cl); + + // There are only 5 types of parameters: int, long, boolean, char, String + MethodTypeDesc methodTypeDesc; + if (cl == CD_int) { + methodTypeDesc = PREPEND_int; + } else if (cl == CD_long) { + methodTypeDesc = PREPEND_long; + } else if (cl == CD_boolean) { + methodTypeDesc = PREPEND_boolean; + } else if (cl == CD_char) { + methodTypeDesc = PREPEND_char; + } else { + kind = TypeKind.ReferenceType; + methodTypeDesc = PREPEND_String; + } + + cb.iload(coderSlot) + .aload(bufSlot) + .loadLocal(kind, cb.parameterSlot(i)) + .aload(constantsSlot) + .ldc(i - 4) + .aaload() + .invokestatic(CD_StringConcatHelper, "prepend", methodTypeDesc); + } + cb.ireturn(); + } + }; + } + + static boolean needStringOf(Class cl) { + return cl != int.class && cl != long.class && cl != boolean.class && cl != char.class; + } + + static boolean maybeUTF16(Class cl) { + return cl == char.class || !cl.isPrimitive(); + } + + static boolean parameterMaybeUTF16(MethodType args) { + for (int i = 0; i < args.parameterCount(); i++) { + if (maybeUTF16(args.parameterType(i))) { + return true; } - } else if (cl == String.class) { - return APPEND_STRING_TYPE; - } else { - return APPEND_OBJECT_TYPE; } + return false; } } } diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java index 24aeabf6d36..e4d322a20d7 100644 --- a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java +++ b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java @@ -446,6 +446,8 @@ public interface JavaLangAccess { */ long stringConcatMix(long lengthCoder, char value); + Object stringConcat1(String[] constants); + /** * Join strings */ diff --git a/src/java.base/share/classes/jdk/internal/util/ClassFileDumper.java b/src/java.base/share/classes/jdk/internal/util/ClassFileDumper.java index afb3d1374ab..b104b56ac0e 100644 --- a/src/java.base/share/classes/jdk/internal/util/ClassFileDumper.java +++ b/src/java.base/share/classes/jdk/internal/util/ClassFileDumper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ package jdk.internal.util; import jdk.internal.misc.VM; -import sun.security.action.GetPropertyAction; import java.io.IOException; import java.nio.file.Files; @@ -80,7 +79,11 @@ public static ClassFileDumper getInstance(String key, String path) { private final AtomicInteger counter = new AtomicInteger(); private ClassFileDumper(String key, String path) { - String value = GetPropertyAction.privilegedGetProperty(key); + /* + * GetPropertyAction.privilegedGetProperty cannot be used here, Using VM.getSavedProperty to avoid a bootstrap + * circularity issue in the java/lang/String/concat/WithSecurityManager.java test + */ + String value = VM.getSavedProperty(key); this.key = key; boolean enabled = value != null && value.isEmpty() ? true : Boolean.parseBoolean(value); if (enabled) { diff --git a/test/jdk/java/lang/String/concat/HiddenClassUnloading.java b/test/jdk/java/lang/String/concat/HiddenClassUnloading.java new file mode 100644 index 00000000000..9e682f38ef5 --- /dev/null +++ b/test/jdk/java/lang/String/concat/HiddenClassUnloading.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.StringBuilder; + +import java.lang.invoke.*; +import java.lang.management.ManagementFactory; + +/** + * @test + * @summary Test whether the hidden class unloading of StringConcatFactory works + * + * @requires vm.flagless + * @run main/othervm -Xmx8M -Xms8M -Xverify:all HiddenClassUnloading + * @run main/othervm -Xmx8M -Xms8M -Xverify:all -XX:-CompactStrings HiddenClassUnloading + */ +public class HiddenClassUnloading { + public static void main(String[] args) throws Throwable { + var lookup = MethodHandles.lookup(); + var types = new Class[] { + int.class, long.class, double.class, float.class, char.class, boolean.class, String.class, + }; + + long initUnloadedClassCount = ManagementFactory.getClassLoadingMXBean().getUnloadedClassCount(); + + for (int i = 0; i < 2000; i++) { + int radix = types.length; + String str = Integer.toString(i, radix); + int length = str.length(); + var ptypes = new Class[length]; + for (int j = 0; j < length; j++) { + int index = Integer.parseInt(str.substring(j, j + 1), radix); + ptypes[j] = types[index]; + } + StringConcatFactory.makeConcatWithConstants( + lookup, + "concat", + MethodType.methodType(String.class, ptypes), + "\1".repeat(length), // recipe + new Object[0] + ); + } + + long unloadedClassCount = ManagementFactory.getClassLoadingMXBean().getUnloadedClassCount(); + if (initUnloadedClassCount == unloadedClassCount) { + throw new RuntimeException("unloadedClassCount is zero"); + } + } +} diff --git a/test/micro/org/openjdk/bench/java/lang/StringConcat.java b/test/micro/org/openjdk/bench/java/lang/StringConcat.java index 015ad224631..e1b8d882dd9 100644 --- a/test/micro/org/openjdk/bench/java/lang/StringConcat.java +++ b/test/micro/org/openjdk/bench/java/lang/StringConcat.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,22 +49,40 @@ public class StringConcat { @Param("4711") public int intValue; - + public Integer integerValue = intValue; + public float floatValue = 156456.36435637F + intValue; public String stringValue = String.valueOf(intValue); - public Object objectValue = Long.valueOf(intValue); - public boolean boolValue = true; - + public Boolean booleanValue = Boolean.TRUE; public byte byteValue = (byte)-128; - public String emptyString = ""; + @Benchmark + public String concatConstBool() { + return "string" + boolValue; + } + + @Benchmark + public String concatConstBoolean() { + return "string" + booleanValue; + } + @Benchmark public String concatConstInt() { return "string" + intValue; } + @Benchmark + public String concatConstInteger() { + return "string" + integerValue; + } + + @Benchmark + public String concatConstFloat() { + return "string" + floatValue; + } + @Benchmark public String concatConstString() { return "string" + stringValue; @@ -94,6 +113,31 @@ public String concatMethodConstString() { return "string".concat(stringValue); } + @Benchmark + public String concatConstBoolString() { + return "string" + boolValue + stringValue; + } + + @Benchmark + public String concatConstBooleanString() { + return "string" + booleanValue + stringValue; + } + + @Benchmark + public String concatConstIntString() { + return "string" + intValue + stringValue; + } + + @Benchmark + public String concatConstIntegerString() { + return "string" + integerValue + stringValue; + } + + @Benchmark + public String concatConstFloatString() { + return "string" + floatValue + stringValue; + } + @Benchmark public String concatConstIntConstInt() { return "string" + intValue + "string" + intValue; @@ -104,6 +148,36 @@ public String concatConstStringConstInt() { return "string" + stringValue + "string" + intValue; } + @Benchmark + public String concatConstStringConst() { + return "string" + stringValue + "string"; + } + + @Benchmark + public String concatConstIntConst() { + return "string" + intValue + "string"; + } + + @Benchmark + public String concatConstIntegerConst() { + return "string" + integerValue + "string"; + } + + @Benchmark + public String concatConstFloatConst() { + return "string" + floatValue + "string"; + } + + @Benchmark + public String concatConstObjectConst() { + return "string" + objectValue + "string"; + } + + @Benchmark + public String concatConstBooleanConst() { + return "string" + booleanValue + "string"; + } + @Benchmark public String concatMix4String() { // Investigate "profile pollution" between shared LFs that might eliminate some JIT optimizations @@ -114,6 +188,31 @@ public String concatMix4String() { return s1 + s2 + s3 + s4; } + @Benchmark + public String concat3String() { + return stringValue + stringValue + stringValue; + } + + @Benchmark + public String concatStringBoolString() { + return stringValue + boolValue + stringValue; + } + + @Benchmark + public String concatStringBooleanString() { + return stringValue + booleanValue + stringValue; + } + + @Benchmark + public String concatStringIntString() { + return stringValue + intValue + stringValue; + } + + @Benchmark + public String concatStringIntegerString() { + return stringValue + integerValue + stringValue; + } + @Benchmark public String concatConst4String() { return "string" + stringValue + stringValue + stringValue + stringValue; @@ -176,6 +275,15 @@ public String concat23String() { + f10 + ","+ f11 + ","+ f12 + ","+ f13 + ","+ f14 + ","+ f15 + ","+ f16 + ","+ f17 + ","+ f18 + ","+ f19 + "," + f20 + ","+ f21 + ","+ f22; } + + @Benchmark + public String concat30Mix() { + return f0 + "," + f1 + ","+ f2 + ","+ f3 + ","+ f4 + ","+ f5 + ","+ f6 + ","+ f7 + ","+ f8 + ","+ f9 + "," + +f10 + ","+f11 + ","+f12 + ","+ f13 + ","+ f14 + ","+ f15 + ","+ f16 + ","+ f17 + ","+ f18 + ","+ f19 + "," + +f20 + ","+f21 + ","+f22 + "," + boolValue + "," + booleanValue + "," + intValue + "," + integerValue + + "," + floatValue + "," + byteValue + "," + objectValue; + } + @Benchmark public String concat123String() { return f0 + ","+ f1 + ","+ f2 + ","+ f3 + ","+ f4 + ","+ f5 + ","+ f6 + ","+ f7 + ","+ f8 + ","+ f9 + "," @@ -193,9 +301,38 @@ public String concat123String() { +f120 + ","+f121 + ","+f122; } + @Benchmark + public String concat13StringConst() { + return f0 + f1 + f2 + f3 + f4 + + f5 + f6 + f7 + f8 + f9 + +f10 + f11 + f12 + """ + A really long constant string. Such as a copyright header: + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + """; + } + @Benchmark public String concat23StringConst() { - return f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + f0 + """ + return f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8 + f9 + f10 + f11 + f12 + f13 + f14 + f15 + f16 + f17 + f18 + f19 + f20 + f21 + f22 + """ A really long constant string. Such as a copyright header: * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. diff --git a/test/micro/org/openjdk/bench/java/lang/StringConcatStartup.java b/test/micro/org/openjdk/bench/java/lang/StringConcatStartup.java index d146fbf9885..cb3d09f94ad 100644 --- a/test/micro/org/openjdk/bench/java/lang/StringConcatStartup.java +++ b/test/micro/org/openjdk/bench/java/lang/StringConcatStartup.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +28,7 @@ import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.State; @@ -44,13 +46,31 @@ public class StringConcatStartup { public static void main(String... args) { - String[] selection = new String[] { "StringLarge", "MixedSmall", "StringSingle", "MixedLarge" }; + String[] selection = { + "StringLarge", + "MixedSmall", + "StringSingle", + "StringThree", + "MixedLarge" + }; if (args.length > 0) { selection = args; } for (String select : selection) { switch (select) { - case "StringSingle" -> new StringSingle().run(); + case "StringSingle" -> { + new StringSingle().constInt(); + new StringSingle().constFloat(); + new StringSingle().constString(); + new StringSingle().const2String(); + new StringSingle().constIntString(); + new StringSingle().constFloatString(); + new StringSingle().constBooleanString(); + } + case "StringThree" -> { + new StringThree().stringIntString(); + new StringThree().stringIntegerString(); + } case "MixedSmall" -> new MixedSmall().run(); case "StringLarge" -> new StringLarge().run(); case "MixedLarge" -> new MixedLarge().run(); @@ -64,14 +84,95 @@ public static void main(String... args) { @Fork(value = 40, warmups = 2) public static class StringSingle { - public String s = "foo"; + @Param("4711") + public int intValue; + public Integer integerValue = intValue; + public float floatValue = 156456.36435637F + intValue; + public String stringValue = String.valueOf(intValue); + public boolean boolValue = true; + public Boolean booleanValue = Boolean.TRUE; @Benchmark - public String run() { - return "" + s; + public String constBool() { + return "string" + boolValue; + } + + @Benchmark + public String constBoolean() { + return "string" + booleanValue; + } + + @Benchmark + public String constInt() { + return "string" + intValue; + } + + @Benchmark + public String constInteger() { + return "string" + integerValue; + } + + @Benchmark + public String constFloat() { + return "string" + floatValue; + } + + @Benchmark + public String constString() { + return "string" + stringValue; + } + + public String const2String() { + return "string" + stringValue + stringValue; + } + + @Benchmark + public String constIntString() { + return "string" + intValue + stringValue; + } + + @Benchmark + public String constIntegerString() { + return "string" + integerValue + stringValue; + } + + @Benchmark + public String constFloatString() { + return "string" + floatValue + stringValue; + } + + @Benchmark + public String constBoolString() { + return "string" + boolValue + stringValue; + } + + @Benchmark + public String constBooleanString() { + return "string" + booleanValue + stringValue; } } + @BenchmarkMode(Mode.SingleShotTime) + @OutputTimeUnit(TimeUnit.MILLISECONDS) + @State(Scope.Thread) + @Fork(value = 40, warmups = 2) + public static class StringThree { + + @Param("4711") + public int intValue; + public Integer integerValue = intValue; + public String stringValue = String.valueOf(intValue); + + @Benchmark + public String stringIntString() { + return stringValue + intValue + stringValue; + } + + @Benchmark + public String stringIntegerString() { + return stringValue + integerValue + stringValue; + } + } @BenchmarkMode(Mode.SingleShotTime) @OutputTimeUnit(TimeUnit.MILLISECONDS) From 07352c67448f3f35827395c83ac95e3ca0e4c6bc Mon Sep 17 00:00:00 2001 From: Pavel Rappo Date: Fri, 16 Aug 2024 14:06:10 +0000 Subject: [PATCH 21/67] 8338398: Trivially fix grammar and typos Reviewed-by: aivanov --- .../share/classes/java/util/concurrent/CompletableFuture.java | 4 ++-- .../share/classes/java/util/concurrent/ForkJoinPool.java | 4 ++-- .../share/classes/java/util/concurrent/ForkJoinTask.java | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java b/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java index 1a43b29d798..8adfd106eeb 100644 --- a/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java +++ b/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java @@ -2805,7 +2805,7 @@ public CompletableFuture completeAsync(Supplier supplier) { /** * Exceptionally completes this CompletableFuture with * a {@link TimeoutException} if not otherwise completed - * before the given timeout. + * before the given timeout elapsed. * * @param timeout how long to wait before completing exceptionally * with a TimeoutException, in units of {@code unit} @@ -2825,7 +2825,7 @@ public CompletableFuture orTimeout(long timeout, TimeUnit unit) { /** * Completes this CompletableFuture with the given value if not - * otherwise completed before the given timeout. + * otherwise completed before the given timeout elapsed. * * @param value the value to use upon timeout * @param timeout how long to wait before completing normally diff --git a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java index 18b2bdeaa42..cb061813c8a 100644 --- a/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java +++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinPool.java @@ -670,7 +670,7 @@ public class ForkJoinPool extends AbstractExecutorService { * period given by field keepAlive (default 60sec), which applies * to the first timeout of a quiescent pool. Subsequent cases use * minimal delays such that, if still quiescent, all will be - * released soon therafter. This is checked by setting the + * released soon thereafter. This is checked by setting the * "source" field of signallee to an invalid value, that will * remain invalid only if it did not process any tasks. * @@ -855,7 +855,7 @@ public class ForkJoinPool extends AbstractExecutorService { * non-workers (which comply with Future.get() specs). Internal * usages of ForkJoinTasks ignore interrupt status when executing * or awaiting completion. Otherwise, reporting task results or - * exceptions is preferred to throwing InterruptedExecptions, + * exceptions is preferred to throwing InterruptedExceptions, * which are in turn preferred to timeouts. Similarly, completion * status is preferred to reporting cancellation. Cancellation is * reported as an unchecked exception by join(), and by worker diff --git a/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java b/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java index 8ca52dc5cb2..7c1e974aafa 100644 --- a/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java +++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinTask.java @@ -1072,7 +1072,7 @@ public final void quietlyInvoke() { /** * Tries to join this task, returning true if it completed - * (possibly exceptionally) before the given timeout and + * (possibly exceptionally) before the given timeout elapsed and * the current thread has not been interrupted. * * @param timeout the maximum time to wait @@ -1097,7 +1097,7 @@ public final boolean quietlyJoin(long timeout, TimeUnit unit) /** * Tries to join this task, returning true if it completed - * (possibly exceptionally) before the given timeout. + * (possibly exceptionally) before the given timeout elapsed. * * @param timeout the maximum time to wait * @param unit the time unit of the timeout argument From 961e944fa731dc84be2764c01e4b326187474605 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Fri, 16 Aug 2024 15:48:54 +0000 Subject: [PATCH 22/67] 8336754: Remodel TypeAnnotation to "has" instead of "be" an Annotation Co-authored-by: Alex Buckley Reviewed-by: asotona --- .../java/lang/classfile/Annotation.java | 46 +++++-- .../lang/classfile/AnnotationElement.java | 58 +++++--- .../java/lang/classfile/AnnotationValue.java | 127 ++++++++++-------- .../java/lang/classfile/TypeAnnotation.java | 72 +++------- .../impl/AbstractAttributeMapper.java | 4 +- .../classfile/impl/AnnotationReader.java | 70 +++++++++- .../classfile/impl/ClassPrinterImpl.java | 4 +- .../classfile/impl/ClassRemapperImpl.java | 4 +- .../classfile/impl/UnboundAttribute.java | 73 +--------- .../impl/verifier/ParserVerifier.java | 4 +- .../com/sun/tools/javap/AnnotationWriter.java | 6 +- test/jdk/jdk/classfile/ClassPrinterTest.java | 6 +- test/jdk/jdk/classfile/TransformTests.java | 2 +- .../jdk/classfile/helpers/ClassRecord.java | 2 +- .../helpers/RebuildingTransformation.java | 3 +- .../classfile/ClassfileInspector.java | 8 +- .../classfile/AnonymousClassTest.java | 4 +- .../referenceinfos/ReferenceInfoUtil.java | 4 +- .../tools/javac/patterns/Annotations.java | 4 +- .../javac/records/RecordCompilationTests.java | 2 +- 20 files changed, 260 insertions(+), 243 deletions(-) diff --git a/src/java.base/share/classes/java/lang/classfile/Annotation.java b/src/java.base/share/classes/java/lang/classfile/Annotation.java index 28c3672bf91..357a7cb77ee 100644 --- a/src/java.base/share/classes/java/lang/classfile/Annotation.java +++ b/src/java.base/share/classes/java/lang/classfile/Annotation.java @@ -37,43 +37,60 @@ import jdk.internal.javac.PreviewFeature; /** - * Models an annotation on a declaration. + * Models an {@code annotation} structure (JVMS {@jvms 4.7.16}) or part of a {@code + * type_annotation} structure (JVMS {@jvms 4.7.20}). This model indicates the + * interface of the annotation and a set of element-value pairs. + *

+ * This model can reconstruct an annotation, given the location of the modeled structure + * in the class file and the definition of the annotation interface. + *

+ * Two {@code Annotation} objects should be compared using the {@link + * Object#equals(Object) equals} method. + * + * @apiNote + * For Java programs, the location of the modeled structure indicates the source code + * element or type (JLS {@jls 9.7.4}) on which the reconstructed annotation appears, + * and the annotation interface definition determines whether the reconstructed annotation has + * elements with default values (JLS {@jls 9.6.2}), and whether the reconstructed annotation + * is a container annotation for multiple annotations (JLS {@jls 9.7.5}). * * @see AnnotationElement * @see AnnotationValue + * @see TypeAnnotation * @see RuntimeVisibleAnnotationsAttribute * @see RuntimeInvisibleAnnotationsAttribute * @see RuntimeVisibleParameterAnnotationsAttribute * @see RuntimeInvisibleParameterAnnotationsAttribute * - * @sealedGraph * @since 22 */ @PreviewFeature(feature = PreviewFeature.Feature.CLASSFILE_API) public sealed interface Annotation - permits TypeAnnotation, AnnotationImpl { + permits AnnotationImpl { /** - * {@return the class of the annotation} + * {@return the constant pool entry holding the {@linkplain Class#descriptorString + * descriptor string} of the annotation interface} */ Utf8Entry className(); /** - * {@return the class of the annotation, as a symbolic descriptor} + * {@return the annotation interface, as a symbolic descriptor} */ default ClassDesc classSymbol() { return ClassDesc.ofDescriptor(className().stringValue()); } /** - * {@return the elements of the annotation} + * {@return the element-value pairs of the annotation} */ List elements(); /** * {@return an annotation} - * @param annotationClass the class of the annotation - * @param elements the elements of the annotation + * @param annotationClass the constant pool entry holding the descriptor string + * of the annotation interface + * @param elements the element-value pairs of the annotation */ static Annotation of(Utf8Entry annotationClass, List elements) { @@ -82,8 +99,9 @@ static Annotation of(Utf8Entry annotationClass, /** * {@return an annotation} - * @param annotationClass the class of the annotation - * @param elements the elements of the annotation + * @param annotationClass the constant pool entry holding the descriptor string + * of the annotation interface + * @param elements the element-value pairs of the annotation */ static Annotation of(Utf8Entry annotationClass, AnnotationElement... elements) { @@ -92,8 +110,8 @@ static Annotation of(Utf8Entry annotationClass, /** * {@return an annotation} - * @param annotationClass the class of the annotation - * @param elements the elements of the annotation + * @param annotationClass the descriptor of the annotation interface + * @param elements the element-value pairs of the annotation */ static Annotation of(ClassDesc annotationClass, List elements) { @@ -102,8 +120,8 @@ static Annotation of(ClassDesc annotationClass, /** * {@return an annotation} - * @param annotationClass the class of the annotation - * @param elements the elements of the annotation + * @param annotationClass the descriptor of the annotation interface + * @param elements the element-value pairs of the annotation */ static Annotation of(ClassDesc annotationClass, AnnotationElement... elements) { diff --git a/src/java.base/share/classes/java/lang/classfile/AnnotationElement.java b/src/java.base/share/classes/java/lang/classfile/AnnotationElement.java index 80adb07ec4b..33bd410e78d 100644 --- a/src/java.base/share/classes/java/lang/classfile/AnnotationElement.java +++ b/src/java.base/share/classes/java/lang/classfile/AnnotationElement.java @@ -32,7 +32,13 @@ import jdk.internal.javac.PreviewFeature; /** - * Models a key-value pair of an annotation. + * Models an element-value pair in the {@code element_value_pairs} + * table in the {@code annotation} structure defined in JVMS + * {@jvms 4.7.16} or the {@code type_annotation} structure defined + * in JVMS {@jvms 4.7.20}. + *

+ * Two {@code AnnotationElement} objects should be compared using the + * {@link Object#equals(Object) equals} method. * * @see Annotation * @see AnnotationValue @@ -45,6 +51,12 @@ public sealed interface AnnotationElement /** * {@return the element name} + * + * @apiNote + * In Java source code, by convention, the name of the sole element in a + * single-element annotation interface is {@code value}. (JLS {@jls 9.6.1}) + * This is the case for single-element annotations (JLS {@jls 9.7.3}) and + * container annotations for multiple annotations (JLS {@jls 9.6.3}). */ Utf8Entry name(); @@ -54,7 +66,7 @@ public sealed interface AnnotationElement AnnotationValue value(); /** - * {@return an annotation key-value pair} + * {@return an element-value pair} * @param name the name of the key * @param value the associated value */ @@ -64,7 +76,7 @@ static AnnotationElement of(Utf8Entry name, } /** - * {@return an annotation key-value pair} + * {@return an element-value pair} * @param name the name of the key * @param value the associated value */ @@ -74,9 +86,10 @@ static AnnotationElement of(String name, } /** - * {@return an annotation key-value pair for a class-valued annotation} + * {@return an element-value pair for a class-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofClass(ClassDesc) AnnotationValue::ofClass */ static AnnotationElement ofClass(String name, ClassDesc value) { @@ -84,9 +97,10 @@ static AnnotationElement ofClass(String name, } /** - * {@return an annotation key-value pair for a string-valued annotation} + * {@return an element-value pair for a string-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofString(String) AnnotationValue::ofString */ static AnnotationElement ofString(String name, String value) { @@ -94,9 +108,10 @@ static AnnotationElement ofString(String name, } /** - * {@return an annotation key-value pair for a long-valued annotation} + * {@return an element-value pair for a long-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofLong(long) AnnotationValue::ofLong */ static AnnotationElement ofLong(String name, long value) { @@ -104,9 +119,10 @@ static AnnotationElement ofLong(String name, } /** - * {@return an annotation key-value pair for an int-valued annotation} + * {@return an element-value pair for an int-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofInt(int) AnnotationValue::ofInt */ static AnnotationElement ofInt(String name, int value) { @@ -114,9 +130,10 @@ static AnnotationElement ofInt(String name, } /** - * {@return an annotation key-value pair for a char-valued annotation} + * {@return an element-value pair for a char-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofChar(char) AnnotationValue::ofChar */ static AnnotationElement ofChar(String name, char value) { @@ -124,9 +141,10 @@ static AnnotationElement ofChar(String name, } /** - * {@return an annotation key-value pair for a short-valued annotation} + * {@return an element-value pair for a short-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofShort(short) AnnotationValue::ofShort */ static AnnotationElement ofShort(String name, short value) { @@ -134,29 +152,32 @@ static AnnotationElement ofShort(String name, } /** - * {@return an annotation key-value pair for a byte-valued annotation} + * {@return an element-value pair for a byte-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofByte(byte) AnnotationValue::ofByte */ static AnnotationElement ofByte(String name, - byte value) { + byte value) { return of(name, AnnotationValue.ofByte(value)); } /** - * {@return an annotation key-value pair for a boolean-valued annotation} + * {@return an element-value pair for a boolean-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofBoolean(boolean) AnnotationValue::ofBoolean */ static AnnotationElement ofBoolean(String name, - boolean value) { + boolean value) { return of(name, AnnotationValue.ofBoolean(value)); } /** - * {@return an annotation key-value pair for a double-valued annotation} + * {@return an element-value pair for a double-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofDouble(double) AnnotationValue::ofDouble */ static AnnotationElement ofDouble(String name, double value) { @@ -164,9 +185,10 @@ static AnnotationElement ofDouble(String name, } /** - * {@return an annotation key-value pair for a float-valued annotation} + * {@return an element-value pair for a float-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofFloat(float) AnnotationValue::ofFloat */ static AnnotationElement ofFloat(String name, float value) { @@ -174,9 +196,10 @@ static AnnotationElement ofFloat(String name, } /** - * {@return an annotation key-value pair for an annotation-valued annotation} + * {@return an element-value pair for an annotation-valued element} * @param name the name of the key * @param value the associated value + * @see AnnotationValue#ofAnnotation AnnotationValue::ofAnnotation */ static AnnotationElement ofAnnotation(String name, Annotation value) { @@ -184,9 +207,10 @@ static AnnotationElement ofAnnotation(String name, } /** - * {@return an annotation key-value pair for an array-valued annotation} + * {@return an element-value pair for an array-valued element} * @param name the name of the key * @param values the associated values + * @see AnnotationValue#ofArray(AnnotationValue...) AnnotationValue::ofArray */ static AnnotationElement ofArray(String name, AnnotationValue... values) { diff --git a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java index 04bbcffb8bc..4decff86ad7 100644 --- a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java +++ b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java @@ -41,7 +41,11 @@ import jdk.internal.javac.PreviewFeature; /** - * Models the value of a key-value pair of an annotation. + * Models an {@code element_value} structure, or a value of an element-value + * pair of an annotation, as defined in JVMS {@jvms 4.7.16.1}. + *

+ * Two {@code AnnotationValue} objects should be compared using the {@link + * Object#equals(Object) equals} method. * * @see Annotation * @see AnnotationElement @@ -53,8 +57,8 @@ public sealed interface AnnotationValue { /** - * Models an annotation-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_ANNOTATION}. + * Models an annotation value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_ANNOTATION}. * * @since 22 */ @@ -66,8 +70,8 @@ sealed interface OfAnnotation extends AnnotationValue } /** - * Models an array-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_ARRAY}. + * Models an array value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_ARRAY}. * * @since 22 */ @@ -79,13 +83,15 @@ sealed interface OfArray extends AnnotationValue * * @apiNote * All array elements derived from Java source code have the same type, - * which must not be an array type. ({@jls 9.6.1}) + * which must not be an array type. (JLS {@jls 9.6.1}) If such elements are + * annotations, they have the same annotation interface; if such elements + * are enum, they belong to the same enum class. */ List values(); } /** - * Models a constant-valued element. + * Models a constant value of an element-value pair. * * @sealedGraph * @since 22 @@ -123,8 +129,8 @@ sealed interface OfConstant } /** - * Models a string-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_STRING}. + * Models a string value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_STRING}. * * @since 22 */ @@ -151,8 +157,8 @@ default String resolvedValue() { } /** - * Models a double-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_DOUBLE}. + * Models a double value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_DOUBLE}. * * @since 22 */ @@ -179,8 +185,8 @@ default Double resolvedValue() { } /** - * Models a float-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_FLOAT}. + * Models a float value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_FLOAT}. * * @since 22 */ @@ -207,8 +213,8 @@ default Float resolvedValue() { } /** - * Models a long-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_LONG}. + * Models a long value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_LONG}. * * @since 22 */ @@ -235,8 +241,8 @@ default Long resolvedValue() { } /** - * Models an int-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_INT}. + * Models an int value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_INT}. * * @since 22 */ @@ -263,8 +269,8 @@ default Integer resolvedValue() { } /** - * Models a short-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_SHORT}. + * Models a short value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_SHORT}. * * @since 22 */ @@ -294,8 +300,8 @@ default Short resolvedValue() { } /** - * Models a char-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_CHAR}. + * Models a char value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_CHAR}. * * @since 22 */ @@ -325,8 +331,8 @@ default Character resolvedValue() { } /** - * Models a byte-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_BYTE}. + * Models a byte value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_BYTE}. * * @since 22 */ @@ -356,8 +362,8 @@ default Byte resolvedValue() { } /** - * Models a boolean-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_BOOLEAN}. + * Models a boolean value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_BOOLEAN}. * * @since 22 */ @@ -387,8 +393,8 @@ default Boolean resolvedValue() { } /** - * Models a class-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_CLASS}. + * Models a class value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_CLASS}. * * @since 22 */ @@ -405,8 +411,8 @@ default ClassDesc classSymbol() { } /** - * Models an enum-valued element. - * The {@linkplain #tag tag} of this element is {@value ClassFile#AEV_ENUM}. + * Models an enum value of an element-value pair. + * The {@linkplain #tag tag} of this value is {@value ClassFile#AEV_ENUM}. * * @since 22 */ @@ -426,12 +432,13 @@ default ClassDesc classSymbol() { } /** - * {@return the tag character for this type as per JVMS {@jvms 4.7.16.1}} + * {@return the tag character for this value as per JVMS {@jvms 4.7.16.1}} + * The tag characters have a one-to-one mapping to the types of annotation element values. */ char tag(); /** - * {@return an annotation element for a enum-valued element} + * {@return an enum value for an element-value pair} * @param className the descriptor string of the enum class * @param constantName the name of the enum constant */ @@ -441,7 +448,7 @@ static OfEnum ofEnum(Utf8Entry className, } /** - * {@return an annotation element for a enum-valued element} + * {@return an enum value for an element-value pair} * @param className the descriptor of the enum class * @param constantName the name of the enum constant */ @@ -451,7 +458,7 @@ static OfEnum ofEnum(ClassDesc className, String constantName) { } /** - * {@return an annotation element for a class-valued element} + * {@return a class value for an element-value pair} * @param className the descriptor string of the class */ static OfClass ofClass(Utf8Entry className) { @@ -459,7 +466,7 @@ static OfClass ofClass(Utf8Entry className) { } /** - * {@return an annotation element for a class-valued element} + * {@return a class value for an element-value pair} * @param className the descriptor of the class */ static OfClass ofClass(ClassDesc className) { @@ -467,7 +474,7 @@ static OfClass ofClass(ClassDesc className) { } /** - * {@return an annotation element for a string-valued element} + * {@return a string value for an element-value pair} * @param value the string */ static OfString ofString(Utf8Entry value) { @@ -475,7 +482,7 @@ static OfString ofString(Utf8Entry value) { } /** - * {@return an annotation element for a string-valued element} + * {@return a string value for an element-value pair} * @param value the string */ static OfString ofString(String value) { @@ -483,7 +490,7 @@ static OfString ofString(String value) { } /** - * {@return an annotation element for a double-valued element} + * {@return a double value for an element-value pair} * @param value the double value */ static OfDouble ofDouble(DoubleEntry value) { @@ -491,7 +498,7 @@ static OfDouble ofDouble(DoubleEntry value) { } /** - * {@return an annotation element for a double-valued element} + * {@return a double value for an element-value pair} * @param value the double value */ static OfDouble ofDouble(double value) { @@ -499,7 +506,7 @@ static OfDouble ofDouble(double value) { } /** - * {@return an annotation element for a float-valued element} + * {@return a float value for an element-value pair} * @param value the float value */ static OfFloat ofFloat(FloatEntry value) { @@ -507,7 +514,7 @@ static OfFloat ofFloat(FloatEntry value) { } /** - * {@return an annotation element for a float-valued element} + * {@return a float value for an element-value pair} * @param value the float value */ static OfFloat ofFloat(float value) { @@ -515,7 +522,7 @@ static OfFloat ofFloat(float value) { } /** - * {@return an annotation element for a long-valued element} + * {@return a long value for an element-value pair} * @param value the long value */ static OfLong ofLong(LongEntry value) { @@ -523,7 +530,7 @@ static OfLong ofLong(LongEntry value) { } /** - * {@return an annotation element for a long-valued element} + * {@return a long value for an element-value pair} * @param value the long value */ static OfLong ofLong(long value) { @@ -531,7 +538,7 @@ static OfLong ofLong(long value) { } /** - * {@return an annotation element for an int-valued element} + * {@return an int value for an element-value pair} * @param value the int value */ static OfInt ofInt(IntegerEntry value) { @@ -539,7 +546,7 @@ static OfInt ofInt(IntegerEntry value) { } /** - * {@return an annotation element for an int-valued element} + * {@return an int value for an element-value pair} * @param value the int value */ static OfInt ofInt(int value) { @@ -547,7 +554,7 @@ static OfInt ofInt(int value) { } /** - * {@return an annotation element for a short-valued element} + * {@return a short value for an element-value pair} * @param value the short value */ static OfShort ofShort(IntegerEntry value) { @@ -555,7 +562,7 @@ static OfShort ofShort(IntegerEntry value) { } /** - * {@return an annotation element for a short-valued element} + * {@return a short value for an element-value pair} * @param value the short value */ static OfShort ofShort(short value) { @@ -563,7 +570,7 @@ static OfShort ofShort(short value) { } /** - * {@return an annotation element for a char-valued element} + * {@return a char value for an element-value pair} * @param value the char value */ static OfChar ofChar(IntegerEntry value) { @@ -571,7 +578,7 @@ static OfChar ofChar(IntegerEntry value) { } /** - * {@return an annotation element for a char-valued element} + * {@return a char value for an element-value pair} * @param value the char value */ static OfChar ofChar(char value) { @@ -579,7 +586,7 @@ static OfChar ofChar(char value) { } /** - * {@return an annotation element for a byte-valued element} + * {@return a byte value for an element-value pair} * @param value the byte value */ static OfByte ofByte(IntegerEntry value) { @@ -587,7 +594,7 @@ static OfByte ofByte(IntegerEntry value) { } /** - * {@return an annotation element for a byte-valued element} + * {@return a byte value for an element-value pair} * @param value the byte value */ static OfByte ofByte(byte value) { @@ -595,7 +602,7 @@ static OfByte ofByte(byte value) { } /** - * {@return an annotation element for a boolean-valued element} + * {@return a boolean value for an element-value pair} * @param value the boolean value */ static OfBoolean ofBoolean(IntegerEntry value) { @@ -603,7 +610,7 @@ static OfBoolean ofBoolean(IntegerEntry value) { } /** - * {@return an annotation element for a boolean-valued element} + * {@return a boolean value for an element-value pair} * @param value the boolean value */ static OfBoolean ofBoolean(boolean value) { @@ -612,7 +619,7 @@ static OfBoolean ofBoolean(boolean value) { } /** - * {@return an annotation element for an annotation-valued element} + * {@return an annotation value for an element-value pair} * @param value the annotation */ static OfAnnotation ofAnnotation(Annotation value) { @@ -620,7 +627,12 @@ static OfAnnotation ofAnnotation(Annotation value) { } /** - * {@return an annotation element for an array-valued element} + * {@return an array value for an element-value pair} + * + * @apiNote + * See {@link AnnotationValue.OfArray#values() values()} for conventions + * on array values derived from Java source code. + * * @param values the array elements */ static OfArray ofArray(List values) { @@ -628,7 +640,12 @@ static OfArray ofArray(List values) { } /** - * {@return an annotation element for an array-valued element} + * {@return an array value for an element-value pair} + * + * @apiNote + * See {@link AnnotationValue.OfArray#values() values()} for conventions + * on array values derived from Java source code. + * * @param values the array elements */ static OfArray ofArray(AnnotationValue... values) { diff --git a/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java b/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java index 01a2f5fc696..b6c9aec76a1 100644 --- a/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java +++ b/src/java.base/share/classes/java/lang/classfile/TypeAnnotation.java @@ -25,12 +25,10 @@ package java.lang.classfile; -import java.lang.constant.ClassDesc; import java.util.List; import java.lang.classfile.attribute.RuntimeInvisibleTypeAnnotationsAttribute; import java.lang.classfile.attribute.RuntimeVisibleTypeAnnotationsAttribute; -import java.lang.classfile.constantpool.Utf8Entry; import jdk.internal.classfile.impl.TargetInfoImpl; import jdk.internal.classfile.impl.UnboundAttribute; @@ -56,12 +54,22 @@ import static java.lang.classfile.ClassFile.TAT_NEW; import static java.lang.classfile.ClassFile.TAT_RESOURCE_VARIABLE; import static java.lang.classfile.ClassFile.TAT_THROWS; -import jdk.internal.classfile.impl.TemporaryConstantPool; import jdk.internal.javac.PreviewFeature; /** - * Models an annotation on a type use, as defined in JVMS {@jvms 4.7.19} and {@jvms 4.7.20}. + * Models a {@code type_annotation} structure (JVMS {@jvms 4.7.20}). This model + * indicates the annotated type within a declaration or expression and the part + * of the indicated type that is annotated, in addition to what is {@linkplain + * #annotation() available} in an {@code Annotation}. + *

+ * This model can reconstruct an annotation on a type or a part of a type, given + * the location of the {@code type_annotation} structure in the class file and + * the definition of the annotation interface. + *

+ * Two {@code TypeAnnotation} objects should be compared using the {@link + * Object#equals(Object) equals} method. * + * @see Annotation * @see RuntimeVisibleTypeAnnotationsAttribute * @see RuntimeInvisibleTypeAnnotationsAttribute * @@ -69,7 +77,6 @@ */ @PreviewFeature(feature = PreviewFeature.Feature.CLASSFILE_API) public sealed interface TypeAnnotation - extends Annotation permits UnboundAttribute.UnboundTypeAnnotation { /** @@ -170,7 +177,7 @@ public int sizeIfFixed() { /** * {@return information describing precisely which type in a declaration or expression - * is annotated} + * is annotated} This models the {@code target_type} and {@code target_info} items. */ TargetInfo targetInfo(); @@ -180,57 +187,22 @@ public int sizeIfFixed() { List targetPath(); /** - * {@return a type annotation} - * @param targetInfo which type in a declaration or expression is annotated - * @param targetPath which part of the type is annotated - * @param annotationClassUtf8Entry the annotation class - * @param annotationElements the annotation elements - */ - static TypeAnnotation of(TargetInfo targetInfo, List targetPath, - Utf8Entry annotationClassUtf8Entry, - List annotationElements) { - return new UnboundAttribute.UnboundTypeAnnotation(targetInfo, targetPath, - annotationClassUtf8Entry, annotationElements); - } - - /** - * {@return a type annotation} - * @param targetInfo which type in a declaration or expression is annotated - * @param targetPath which part of the type is annotated - * @param annotationClass the annotation class - * @param annotationElements the annotation elements + * {@return the annotation applied to the part indicated by {@link #targetPath()}} + * This models the interface of the annotation and the set of element-value pairs, + * the subset of the {@code type_annotation} structure that is identical to the + * {@code annotation} structure. */ - static TypeAnnotation of(TargetInfo targetInfo, List targetPath, - ClassDesc annotationClass, - AnnotationElement... annotationElements) { - return of(targetInfo, targetPath, annotationClass, List.of(annotationElements)); - } - - /** - * {@return a type annotation} - * @param targetInfo which type in a declaration or expression is annotated - * @param targetPath which part of the type is annotated - * @param annotationClass the annotation class - * @param annotationElements the annotation elements - */ - static TypeAnnotation of(TargetInfo targetInfo, List targetPath, - ClassDesc annotationClass, - List annotationElements) { - return of(targetInfo, targetPath, - TemporaryConstantPool.INSTANCE.utf8Entry(annotationClass.descriptorString()), annotationElements); - } + Annotation annotation(); /** - * {@return a type annotation} + * {@return a {@code type_annotation} structure} * @param targetInfo which type in a declaration or expression is annotated * @param targetPath which part of the type is annotated - * @param annotationClassUtf8Entry the annotation class - * @param annotationElements the annotation elements + * @param annotation the annotation */ static TypeAnnotation of(TargetInfo targetInfo, List targetPath, - Utf8Entry annotationClassUtf8Entry, - AnnotationElement... annotationElements) { - return of(targetInfo, targetPath, annotationClassUtf8Entry, List.of(annotationElements)); + Annotation annotation) { + return new UnboundAttribute.UnboundTypeAnnotation(targetInfo, targetPath, annotation); } /** diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractAttributeMapper.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractAttributeMapper.java index 0029b503d7a..8be167cd119 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractAttributeMapper.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractAttributeMapper.java @@ -657,7 +657,7 @@ public RuntimeInvisibleTypeAnnotationsAttribute readAttribute(AttributedElement @Override protected void writeBody(BufWriter buf, RuntimeInvisibleTypeAnnotationsAttribute attr) { - AnnotationReader.writeAnnotations(buf, attr.annotations()); + AnnotationReader.writeTypeAnnotations(buf, attr.annotations()); } } @@ -714,7 +714,7 @@ public RuntimeVisibleTypeAnnotationsAttribute readAttribute(AttributedElement e, @Override protected void writeBody(BufWriter buf, RuntimeVisibleTypeAnnotationsAttribute attr) { - AnnotationReader.writeAnnotations(buf, attr.annotations()); + AnnotationReader.writeTypeAnnotations(buf, attr.annotations()); } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java index e21938bbc0c..6802d6e75aa 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java @@ -241,10 +241,8 @@ private static TypeAnnotation readTypeAnnotation(ClassReader classReader, int p, }; } // the annotation info for this annotation - Utf8Entry type = classReader.readEntry(p, Utf8Entry.class); - p += 2; - return TypeAnnotation.of(targetInfo, List.of(typePath), type, - readAnnotationElementValuePairs(classReader, p)); + var anno = readAnnotation(classReader, p); + return TypeAnnotation.of(targetInfo, List.of(typePath), anno); } private static List readLocalVarEntries(ClassReader classReader, int p, LabelContext lc, int targetType) { @@ -283,13 +281,11 @@ private static int skipTypeAnnotation(ClassReader classReader, int p) { } public static void writeAnnotation(BufWriterImpl buf, Annotation annotation) { - // handles annotations and type annotations // TODO annotation cleanup later ((Util.Writable) annotation).writeTo(buf); } - public static void writeAnnotations(BufWriter buf, List list) { - // handles annotations and type annotations + public static void writeAnnotations(BufWriter buf, List list) { var internalBuf = (BufWriterImpl) buf; internalBuf.writeU2(list.size()); for (var e : list) { @@ -297,6 +293,66 @@ public static void writeAnnotations(BufWriter buf, List li } } + private static int labelToBci(LabelContext lr, Label label, TypeAnnotation ta) { + //helper method to avoid NPE + if (lr == null) throw new IllegalArgumentException("Illegal targetType '%s' in TypeAnnotation outside of Code attribute".formatted(ta.targetInfo().targetType())); + return lr.labelToBci(label); + } + + public static void writeTypeAnnotation(BufWriterImpl buf, TypeAnnotation ta) { + LabelContext lr = buf.labelContext(); + // target_type + buf.writeU1(ta.targetInfo().targetType().targetTypeValue()); + + // target_info + switch (ta.targetInfo()) { + case TypeAnnotation.TypeParameterTarget tpt -> buf.writeU1(tpt.typeParameterIndex()); + case TypeAnnotation.SupertypeTarget st -> buf.writeU2(st.supertypeIndex()); + case TypeAnnotation.TypeParameterBoundTarget tpbt -> { + buf.writeU1(tpbt.typeParameterIndex()); + buf.writeU1(tpbt.boundIndex()); + } + case TypeAnnotation.EmptyTarget _ -> { + // nothing to write + } + case TypeAnnotation.FormalParameterTarget fpt -> buf.writeU1(fpt.formalParameterIndex()); + case TypeAnnotation.ThrowsTarget tt -> buf.writeU2(tt.throwsTargetIndex()); + case TypeAnnotation.LocalVarTarget lvt -> { + buf.writeU2(lvt.table().size()); + for (var e : lvt.table()) { + int startPc = labelToBci(lr, e.startLabel(), ta); + buf.writeU2(startPc); + buf.writeU2(labelToBci(lr, e.endLabel(), ta) - startPc); + buf.writeU2(e.index()); + } + } + case TypeAnnotation.CatchTarget ct -> buf.writeU2(ct.exceptionTableIndex()); + case TypeAnnotation.OffsetTarget ot -> buf.writeU2(labelToBci(lr, ot.target(), ta)); + case TypeAnnotation.TypeArgumentTarget tat -> { + buf.writeU2(labelToBci(lr, tat.target(), ta)); + buf.writeU1(tat.typeArgumentIndex()); + } + } + + // target_path + buf.writeU1(ta.targetPath().size()); + for (TypeAnnotation.TypePathComponent component : ta.targetPath()) { + buf.writeU1(component.typePathKind().tag()); + buf.writeU1(component.typeArgumentIndex()); + } + + // annotation data + writeAnnotation(buf, ta.annotation()); + } + + public static void writeTypeAnnotations(BufWriter buf, List list) { + var internalBuf = (BufWriterImpl) buf; + internalBuf.writeU2(list.size()); + for (var e : list) { + writeTypeAnnotation(internalBuf, e); + } + } + public static void writeAnnotationValue(BufWriterImpl buf, AnnotationValue value) { // TODO annotation cleanup later ((Util.Writable) value).writeTo(buf); diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/ClassPrinterImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/ClassPrinterImpl.java index 75346dd5998..2a648e27568 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/ClassPrinterImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/ClassPrinterImpl.java @@ -1038,9 +1038,9 @@ private static Node annotationsToTree(String name, List annos) { private static Node typeAnnotationsToTree(Style style, String name, List annos) { return new ListNodeImpl(style, name, annos.stream().map(a -> new MapNodeImpl(FLOW, "anno") - .with(leaf("annotation class", a.className().stringValue()), + .with(leaf("annotation class", a.annotation().className().stringValue()), leaf("target info", a.targetInfo().targetType().name())) - .with(elementValuePairsToTree(a.elements())))); + .with(elementValuePairsToTree(a.annotation().elements())))); } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/ClassRemapperImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/ClassRemapperImpl.java index e3f701b2e2a..9b5e7639fa2 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/ClassRemapperImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/ClassRemapperImpl.java @@ -401,9 +401,7 @@ AnnotationValue mapAnnotationValue(AnnotationValue val) { List mapTypeAnnotations(List typeAnnotations) { return typeAnnotations.stream().map(a -> TypeAnnotation.of(a.targetInfo(), - a.targetPath(), map(a.classSymbol()), - a.elements().stream().map(el -> AnnotationElement.of(el.name(), - mapAnnotationValue(el.value()))).toList())).toList(); + a.targetPath(), mapAnnotation(a.annotation()))).toList(); } List mapTypeParams(List typeParams) { diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/UnboundAttribute.java b/src/java.base/share/classes/jdk/internal/classfile/impl/UnboundAttribute.java index 6b64511fd5a..5ee3759bba3 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/UnboundAttribute.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/UnboundAttribute.java @@ -29,14 +29,12 @@ import java.util.Optional; import java.lang.classfile.Annotation; -import java.lang.classfile.AnnotationElement; import java.lang.classfile.AnnotationValue; import java.lang.classfile.Attribute; import java.lang.classfile.AttributeMapper; import java.lang.classfile.Attributes; import java.lang.classfile.BootstrapMethodEntry; import java.lang.classfile.constantpool.ClassEntry; -import java.lang.classfile.Label; import java.lang.classfile.TypeAnnotation; import java.lang.classfile.attribute.AnnotationDefaultAttribute; import java.lang.classfile.attribute.BootstrapMethodsAttribute; @@ -758,75 +756,10 @@ public UnboundRecordComponentInfo(Utf8Entry name, Utf8Entry descriptor, List targetPath, - Utf8Entry className, - List elements) implements TypeAnnotation, Util.Writable { - - public UnboundTypeAnnotation(TargetInfo targetInfo, List targetPath, - Utf8Entry className, List elements) { - this.targetInfo = targetInfo; - this.targetPath = List.copyOf(targetPath); - this.className = className; - this.elements = List.copyOf(elements); - } - - private int labelToBci(LabelContext lr, Label label) { - //helper method to avoid NPE - if (lr == null) throw new IllegalArgumentException("Illegal targetType '%s' in TypeAnnotation outside of Code attribute".formatted(targetInfo.targetType())); - return lr.labelToBci(label); - } - - @Override - public void writeTo(BufWriterImpl buf) { - LabelContext lr = buf.labelContext(); - // target_type - buf.writeU1(targetInfo.targetType().targetTypeValue()); - - // target_info - switch (targetInfo) { - case TypeParameterTarget tpt -> buf.writeU1(tpt.typeParameterIndex()); - case SupertypeTarget st -> buf.writeU2(st.supertypeIndex()); - case TypeParameterBoundTarget tpbt -> { - buf.writeU1(tpbt.typeParameterIndex()); - buf.writeU1(tpbt.boundIndex()); - } - case EmptyTarget et -> { - // nothing to write - } - case FormalParameterTarget fpt -> buf.writeU1(fpt.formalParameterIndex()); - case ThrowsTarget tt -> buf.writeU2(tt.throwsTargetIndex()); - case LocalVarTarget lvt -> { - buf.writeU2(lvt.table().size()); - for (var e : lvt.table()) { - int startPc = labelToBci(lr, e.startLabel()); - buf.writeU2(startPc); - buf.writeU2(labelToBci(lr, e.endLabel()) - startPc); - buf.writeU2(e.index()); - } - } - case CatchTarget ct -> buf.writeU2(ct.exceptionTableIndex()); - case OffsetTarget ot -> buf.writeU2(labelToBci(lr, ot.target())); - case TypeArgumentTarget tat -> { - buf.writeU2(labelToBci(lr, tat.target())); - buf.writeU1(tat.typeArgumentIndex()); - } - } + Annotation annotation) implements TypeAnnotation { - // target_path - buf.writeU1(targetPath().size()); - for (TypePathComponent component : targetPath()) { - buf.writeU1(component.typePathKind().tag()); - buf.writeU1(component.typeArgumentIndex()); - } - - // type_index - buf.writeIndex(className); - - // element_value_pairs - buf.writeU2(elements.size()); - for (AnnotationElement pair : elements()) { - buf.writeIndex(pair.name()); - AnnotationReader.writeAnnotationValue(buf, pair.value()); - } + public UnboundTypeAnnotation { + targetPath = List.copyOf(targetPath); } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/ParserVerifier.java b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/ParserVerifier.java index 4a2ffd3f25d..77f56b322dd 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/ParserVerifier.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/verifier/ParserVerifier.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -393,7 +393,7 @@ private static int annotationsSize(List ans) { private static int typeAnnotationsSize(List ans) { int l = 2; for (var an : ans) { - l += 2 + an.targetInfo().size() + 2 * an.targetPath().size() + annotationSize(an); + l += 2 + an.targetInfo().size() + 2 * an.targetPath().size() + annotationSize(an.annotation()); } return l; } diff --git a/src/jdk.jdeps/share/classes/com/sun/tools/javap/AnnotationWriter.java b/src/jdk.jdeps/share/classes/com/sun/tools/javap/AnnotationWriter.java index ae5cd2df024..66b978f252c 100644 --- a/src/jdk.jdeps/share/classes/com/sun/tools/javap/AnnotationWriter.java +++ b/src/jdk.jdeps/share/classes/com/sun/tools/javap/AnnotationWriter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,13 +95,13 @@ public void write(TypeAnnotation annot, CodeAttribute lr) { write(annot, true, false, lr); println(); indent(+1); - write(annot, true); + write(annot.annotation(), true); indent(-1); } public void write(TypeAnnotation annot, boolean showOffsets, boolean resolveIndices, CodeAttribute lr) { - write(annot, resolveIndices); + write(annot.annotation(), resolveIndices); print(": "); write(annot.targetInfo(), annot.targetPath(), showOffsets, lr); } diff --git a/test/jdk/jdk/classfile/ClassPrinterTest.java b/test/jdk/jdk/classfile/ClassPrinterTest.java index 7668648f829..bd95075a08e 100644 --- a/test/jdk/jdk/classfile/ClassPrinterTest.java +++ b/test/jdk/jdk/classfile/ClassPrinterTest.java @@ -66,7 +66,7 @@ ClassModel getClassModel() { RuntimeInvisibleTypeAnnotationsAttribute.of( TypeAnnotation.of(TypeAnnotation.TargetInfo.ofField(), List.of(TypeAnnotation.TypePathComponent.WILDCARD), - ClassDesc.of("Boo"), List.of())))))) + Annotation.of(ClassDesc.of("Boo"), List.of()))))))) .with(RuntimeInvisibleAnnotationsAttribute.of(Annotation.of(ClassDesc.of("Phoo"), AnnotationElement.ofFloat("flfl", 2), AnnotationElement.ofFloat("frfl", 3)))) .with(PermittedSubclassesAttribute.ofSymbols(ClassDesc.of("Boo"), ClassDesc.of("Phoo"))) .withField("f", ConstantDescs.CD_String, fb -> fb @@ -101,7 +101,7 @@ ClassModel getClassModel() { tryb.with(RuntimeInvisibleTypeAnnotationsAttribute.of( TypeAnnotation.of(TypeAnnotation.TargetInfo.ofField(), List.of(TypeAnnotation.TypePathComponent.WILDCARD), - ClassDesc.of("Boo"), List.of()))); + Annotation.of(ClassDesc.of("Boo"), List.of())))); tryb.invokedynamic(DynamicCallSiteDesc.of( MethodHandleDesc.ofMethod(DirectMethodHandleDesc.Kind.STATIC, ClassDesc.of("Phoo"), "phee", MethodTypeDesc.of(ClassDesc.of("Boo"))), "intfMethod", @@ -116,7 +116,7 @@ ClassModel getClassModel() { .with(RuntimeVisibleTypeAnnotationsAttribute.of( TypeAnnotation.of(TypeAnnotation.TargetInfo.ofField(), List.of(TypeAnnotation.TypePathComponent.ARRAY), - ClassDesc.of("Fee"), List.of(AnnotationElement.ofBoolean("yes", false))))) + Annotation.of(ClassDesc.of("Fee"), List.of(AnnotationElement.ofBoolean("yes", false)))))) )))); } diff --git a/test/jdk/jdk/classfile/TransformTests.java b/test/jdk/jdk/classfile/TransformTests.java index fb9f72ba3aa..b78da8b4311 100644 --- a/test/jdk/jdk/classfile/TransformTests.java +++ b/test/jdk/jdk/classfile/TransformTests.java @@ -23,7 +23,7 @@ /* * @test - * @bug 8336010 8336588 + * @bug 8335935 8336588 * @summary Testing ClassFile transformations. * @run junit TransformTests */ diff --git a/test/jdk/jdk/classfile/helpers/ClassRecord.java b/test/jdk/jdk/classfile/helpers/ClassRecord.java index a62ef172367..ce22037ccda 100644 --- a/test/jdk/jdk/classfile/helpers/ClassRecord.java +++ b/test/jdk/jdk/classfile/helpers/ClassRecord.java @@ -828,7 +828,7 @@ public static TypeAnnotationRecord ofTypeAnnotation(TypeAnnotation ann, CodeAttr ann.targetInfo().targetType().targetTypeValue(), TargetInfoRecord.ofTargetInfo(ann.targetInfo(), lr, code), ann.targetPath().stream().map(tpc -> TypePathRecord.ofTypePathComponent(tpc)).collect(toSet()), - AnnotationRecord.ofAnnotation(ann)); + AnnotationRecord.ofAnnotation(ann.annotation())); } public interface TargetInfoRecord { diff --git a/test/jdk/jdk/classfile/helpers/RebuildingTransformation.java b/test/jdk/jdk/classfile/helpers/RebuildingTransformation.java index 7667403aaab..b769898d785 100644 --- a/test/jdk/jdk/classfile/helpers/RebuildingTransformation.java +++ b/test/jdk/jdk/classfile/helpers/RebuildingTransformation.java @@ -179,8 +179,7 @@ static TypeAnnotation[] transformTypeAnnotations(List annotation return annotations.stream().map(ta -> TypeAnnotation.of( transformTargetInfo(ta.targetInfo(), cob, labels), ta.targetPath().stream().map(tpc -> TypeAnnotation.TypePathComponent.of(tpc.typePathKind(), tpc.typeArgumentIndex())).toList(), - ta.classSymbol(), - ta.elements().stream().map(ae -> AnnotationElement.of(ae.name().stringValue(), transformAnnotationValue(ae.value()))).toList())).toArray(TypeAnnotation[]::new); + transformAnnotation(ta.annotation()))).toArray(TypeAnnotation[]::new); } static TypeAnnotation.TargetInfo transformTargetInfo(TypeAnnotation.TargetInfo ti, CodeBuilder cob, HashMap labels) { diff --git a/test/langtools/lib/annotations/annotations/classfile/ClassfileInspector.java b/test/langtools/lib/annotations/annotations/classfile/ClassfileInspector.java index d6219dbff11..30a5f54ac26 100644 --- a/test/langtools/lib/annotations/annotations/classfile/ClassfileInspector.java +++ b/test/langtools/lib/annotations/annotations/classfile/ClassfileInspector.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -603,7 +603,7 @@ public void matchAnnotation(TypeAnnotation anno) { } public boolean checkMatch(TypeAnnotation anno) { - boolean matches = checkMatch((Annotation) anno); + boolean matches = checkMatch(anno.annotation()); int boundIdx = Integer.MIN_VALUE, paraIdx = Integer.MIN_VALUE, tIdx = Integer.MIN_VALUE, exIdx = Integer.MIN_VALUE; switch (anno.targetInfo()) { case TypeAnnotation.TypeParameterBoundTarget binfo -> { @@ -1197,8 +1197,8 @@ public void annoMatcher(Attribute attr, ExpectedAnnotation expected) { switch (attr) { case RuntimeVisibleTypeAnnotationsAttribute rvattr -> { if (expected.matchVisibility(true)) { - for(Annotation anno : rvattr.annotations()) { - expected.matchAnnotation(anno); + for(var anno : rvattr.annotations()) { + expected.matchAnnotation(anno.annotation()); } } } diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/classfile/AnonymousClassTest.java b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/AnonymousClassTest.java index 94610ddf055..eb4b5aa2cf7 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/classfile/AnonymousClassTest.java +++ b/test/langtools/tools/javac/annotations/typeAnnotations/classfile/AnonymousClassTest.java @@ -215,7 +215,7 @@ private static String annotationDebugString(ClassModel cm, CodeAttribute cAttr, int offset = info instanceof TypeAnnotation.OffsetTarget offsetInfo? cAttr.labelToBci(offsetInfo.target()): -1; String name; try { - name = annotation.classSymbol().descriptorString(); + name = annotation.annotation().classSymbol().descriptorString(); } catch (Exception e) { throw new AssertionError(e); } @@ -227,7 +227,7 @@ private static String annotationDebugString(ClassModel cm, CodeAttribute cAttr, return String.format( "@%s(%s) %s, offset=%d, location=%s", name, - annotationValueDebugString(cm, annotation), + annotationValueDebugString(cm, annotation.annotation()), info.targetType(), offset, location); diff --git a/test/langtools/tools/javac/annotations/typeAnnotations/referenceinfos/ReferenceInfoUtil.java b/test/langtools/tools/javac/annotations/typeAnnotations/referenceinfos/ReferenceInfoUtil.java index e2e69a01a49..18e3a3dec56 100644 --- a/test/langtools/tools/javac/annotations/typeAnnotations/referenceinfos/ReferenceInfoUtil.java +++ b/test/langtools/tools/javac/annotations/typeAnnotations/referenceinfos/ReferenceInfoUtil.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,7 +97,7 @@ private static List generateTADList(List annos, CodeAttribu List result = new ArrayList<>(); for (TypeAnnotation anno: annos) { TAD tad = new TAD(); - tad.annotation = anno.className().stringValue(); + tad.annotation = anno.annotation().className().stringValue(); tad.type = anno.targetInfo().targetType(); switch (anno.targetInfo().targetType()) { case CAST, CONSTRUCTOR_INVOCATION_TYPE_ARGUMENT, METHOD_INVOCATION_TYPE_ARGUMENT -> { diff --git a/test/langtools/tools/javac/patterns/Annotations.java b/test/langtools/tools/javac/patterns/Annotations.java index a471cc57b63..a8106a39827 100644 --- a/test/langtools/tools/javac/patterns/Annotations.java +++ b/test/langtools/tools/javac/patterns/Annotations.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ void run() throws Exception { StringBuilder actual = new StringBuilder(); for (TypeAnnotation ta: annotations.annotations()) { TypeAnnotation.LocalVarTargetInfo info = ((TypeAnnotation.LocalVarTarget) ta.targetInfo()).table().getFirst(); - actual.append(ta.className().stringValue() + " pos: [" + ta.targetInfo().targetType()); + actual.append(ta.annotation().className().stringValue() + " pos: [" + ta.targetInfo().targetType()); actual.append(", {start_pc=" + codeAttr.labelToBci(info.startLabel()) + ", end_pc=" + codeAttr.labelToBci(info.endLabel())); actual.append(", index=" + info.index()+ "}], "); } diff --git a/test/langtools/tools/javac/records/RecordCompilationTests.java b/test/langtools/tools/javac/records/RecordCompilationTests.java index d80ade50a3b..a01712fedb3 100644 --- a/test/langtools/tools/javac/records/RecordCompilationTests.java +++ b/test/langtools/tools/javac/records/RecordCompilationTests.java @@ -1615,7 +1615,7 @@ private void checkTypeAnno(Attribute rtAnnos, } assert tAnno != null; Assert.check(tAnno.targetInfo().targetType().name().equals(positionType)); - String annotationName = tAnno.classSymbol().displayName(); + String annotationName = tAnno.annotation().classSymbol().displayName(); Assert.check(annotationName.startsWith(annoName)); } private void checkAnno(Attribute rAnnos, From 8635642dbdfb74d2ae50a51611fd2c5980fe6e74 Mon Sep 17 00:00:00 2001 From: Chris Plummer Date: Fri, 16 Aug 2024 16:39:36 +0000 Subject: [PATCH 23/67] 8338469: com/sun/jdi/DataDumpTest.java failed with Not a debuggee, or not listening for debugger to attach Reviewed-by: dcubed --- test/jdk/com/sun/jdi/DataDumpTest.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/jdk/com/sun/jdi/DataDumpTest.java b/test/jdk/com/sun/jdi/DataDumpTest.java index f46d0d5de4f..8d7404f58e9 100644 --- a/test/jdk/com/sun/jdi/DataDumpTest.java +++ b/test/jdk/com/sun/jdi/DataDumpTest.java @@ -84,6 +84,11 @@ private static void runTest(String jdwpArg) throws Exception { try { p = pb.start(); InputStream is = p.getInputStream(); + + // Read the first character of output to make sure we've waited until the + // debuggee is ready. This will be the debug agent's "Listening..." message. + char firstChar = (char)is.read(); + out = new OutputAnalyzer(p); // Attach a debugger and do the data dump. The data dump output will appear @@ -92,8 +97,8 @@ private static void runTest(String jdwpArg) throws Exception { out.waitFor(); // Wait for the debuggee to exit - System.out.println("Deuggee output:"); - System.out.println(out.getOutput()); + System.out.println("Debuggee output:"); + System.out.println(firstChar + out.getOutput()); // All these strings are part of the debug agent data dump output. out.shouldHaveExitValue(0); From 2f7ba781bf2e4e6d0fa658c19f86c6c05d60358a Mon Sep 17 00:00:00 2001 From: SendaoYan Date: Mon, 19 Aug 2024 04:44:24 +0000 Subject: [PATCH 24/67] 8335150: Test LogGeneratedClassesTest.java fails on rpmbuild mock enviroment Reviewed-by: jpai --- .../lambda/LogGeneratedClassesTest.java | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java b/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java index 91ea4b932ca..06aa479717b 100644 --- a/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java +++ b/test/jdk/java/lang/invoke/lambda/LogGeneratedClassesTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.FileStore; import java.nio.file.attribute.PosixFileAttributeView; import jdk.test.lib.compiler.CompilerUtils; @@ -47,6 +48,7 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +import org.testng.SkipException; import static java.nio.file.attribute.PosixFilePermissions.*; import static jdk.test.lib.process.ProcessTools.*; @@ -207,15 +209,15 @@ private static boolean isWriteableDirectory(Path p) { @Test public void testDumpDirNotWritable() throws Exception { - if (!Files.getFileStore(Paths.get(".")) - .supportsFileAttributeView(PosixFileAttributeView.class)) { + FileStore fs; + try { + fs = Files.getFileStore(Paths.get(".")); + } catch (IOException e) { + throw new SkipException("WARNING: IOException occurred: " + e + ", Skipping testDumpDirNotWritable test."); + } + if (!fs.supportsFileAttributeView(PosixFileAttributeView.class)) { // No easy way to setup readonly directory without POSIX - // We would like to skip the test with a cause with - // throw new SkipException("Posix not supported"); - // but jtreg will report failure so we just pass the test - // which we can look at if jtreg changed its behavior - System.out.println("WARNING: POSIX is not supported. Skipping testDumpDirNotWritable test."); - return; + throw new SkipException("WARNING: POSIX is not supported. Skipping testDumpDirNotWritable test."); } Path testDir = Path.of("readOnly"); @@ -227,8 +229,7 @@ public void testDumpDirNotWritable() throws Exception { if (isWriteableDirectory(dumpDir)) { // Skipping the test: it's allowed to write into read-only directory // (e.g. current user is super user). - System.out.println("WARNING: The dump directory is writeable. Skipping testDumpDirNotWritable test."); - return; + throw new SkipException("WARNING: The dump directory is writeable. Skipping testDumpDirNotWritable test."); } ProcessBuilder pb = createLimitedTestJavaProcessBuilder( From 56a007dd32061695d7bb0faf47e1793728e86c88 Mon Sep 17 00:00:00 2001 From: Tejesh R Date: Mon, 19 Aug 2024 06:42:51 +0000 Subject: [PATCH 25/67] 8338488: Add screen capture for failure case Reviewed-by: azvegint --- .../java/awt/Checkbox/CheckboxCheckerScalingTest.java | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/jdk/java/awt/Checkbox/CheckboxCheckerScalingTest.java b/test/jdk/java/awt/Checkbox/CheckboxCheckerScalingTest.java index 5e531e84801..a07620dbd38 100644 --- a/test/jdk/java/awt/Checkbox/CheckboxCheckerScalingTest.java +++ b/test/jdk/java/awt/Checkbox/CheckboxCheckerScalingTest.java @@ -29,6 +29,10 @@ import java.awt.Rectangle; import java.awt.Robot; import java.awt.image.BufferedImage; +import java.io.File; +import java.io.IOException; + +import javax.imageio.ImageIO; /* * @test @@ -78,6 +82,12 @@ public static void main(String[] args) throws Exception { }); if (!checkmarkFound) { + try { + ImageIO.write(imageAfterChecked, "png", + new File("imageAfterChecked.png")); + } catch (IOException e) { + throw new RuntimeException(e); + } throw new RuntimeException("Checkmark not scaled"); } System.out.println("Test Passed"); From 15b20cb1fd18b849e49c175737dd3826c8d0ceff Mon Sep 17 00:00:00 2001 From: Manukumar V S Date: Mon, 19 Aug 2024 07:17:37 +0000 Subject: [PATCH 26/67] 8337886: java/awt/Frame/MaximizeUndecoratedTest.java fails in OEL due to a slight color difference Reviewed-by: dnguyen, honkar, serb --- test/jdk/java/awt/Frame/MaximizeUndecoratedTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/jdk/java/awt/Frame/MaximizeUndecoratedTest.java b/test/jdk/java/awt/Frame/MaximizeUndecoratedTest.java index 77046ea3e1f..3e23af76bb6 100644 --- a/test/jdk/java/awt/Frame/MaximizeUndecoratedTest.java +++ b/test/jdk/java/awt/Frame/MaximizeUndecoratedTest.java @@ -50,7 +50,7 @@ public class MaximizeUndecoratedTest { private static final int SIZE = 300; - private static final int OFFSET = 2; + private static final int OFFSET = 5; private static Frame frame; private static Robot robot; From f0374a0bc181d0f2a8c0aa9aa032b07998ffaf60 Mon Sep 17 00:00:00 2001 From: Andrew Dinn Date: Mon, 19 Aug 2024 09:00:19 +0000 Subject: [PATCH 27/67] 8337987: Relocate jfr and throw_exception stubs from StubGenerator to SharedRuntime Reviewed-by: fyang, kvn, yzheng --- .../cpu/aarch64/macroAssembler_aarch64.cpp | 2 +- .../cpu/aarch64/methodHandles_aarch64.cpp | 4 +- .../cpu/aarch64/sharedRuntime_aarch64.cpp | 196 ++++++++++++++ .../cpu/aarch64/stubGenerator_aarch64.cpp | 238 +---------------- .../templateInterpreterGenerator_aarch64.cpp | 4 +- src/hotspot/cpu/arm/methodHandles_arm.cpp | 5 +- src/hotspot/cpu/arm/sharedRuntime_arm.cpp | 140 ++++++++++ src/hotspot/cpu/arm/stubGenerator_arm.cpp | 154 ----------- .../arm/templateInterpreterGenerator_arm.cpp | 2 +- src/hotspot/cpu/ppc/macroAssembler_ppc.cpp | 2 +- src/hotspot/cpu/ppc/methodHandles_ppc.cpp | 6 +- src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp | 173 +++++++++++++ src/hotspot/cpu/ppc/stubGenerator_ppc.cpp | 209 +-------------- .../ppc/templateInterpreterGenerator_ppc.cpp | 4 +- .../cpu/riscv/macroAssembler_riscv.cpp | 2 +- src/hotspot/cpu/riscv/methodHandles_riscv.cpp | 4 +- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 195 ++++++++++++++ src/hotspot/cpu/riscv/stubGenerator_riscv.cpp | 239 +---------------- .../templateInterpreterGenerator_riscv.cpp | 4 +- src/hotspot/cpu/s390/macroAssembler_s390.cpp | 2 +- src/hotspot/cpu/s390/methodHandles_s390.cpp | 6 +- src/hotspot/cpu/s390/sharedRuntime_s390.cpp | 94 +++++++ src/hotspot/cpu/s390/stubGenerator_s390.cpp | 122 --------- .../templateInterpreterGenerator_s390.cpp | 6 +- src/hotspot/cpu/x86/macroAssembler_x86.cpp | 2 +- src/hotspot/cpu/x86/methodHandles_x86.cpp | 4 +- src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp | 211 +++++++++++++++ src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 193 ++++++++++++++ src/hotspot/cpu/x86/stubGenerator_x86_32.cpp | 239 ----------------- src/hotspot/cpu/x86/stubGenerator_x86_64.cpp | 240 +----------------- src/hotspot/cpu/x86/stubGenerator_x86_64.hpp | 10 - .../x86/templateInterpreterGenerator_x86.cpp | 4 +- src/hotspot/cpu/zero/sharedRuntime_zero.cpp | 21 +- src/hotspot/cpu/zero/stubGenerator_zero.cpp | 16 -- src/hotspot/share/jvmci/jvmciCompilerToVM.hpp | 1 + .../share/jvmci/jvmciCompilerToVMInit.cpp | 2 + src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 4 +- src/hotspot/share/opto/library_call.cpp | 4 +- src/hotspot/share/runtime/init.cpp | 6 + src/hotspot/share/runtime/sharedRuntime.cpp | 68 ++++- src/hotspot/share/runtime/sharedRuntime.hpp | 36 ++- src/hotspot/share/runtime/stubRoutines.cpp | 10 - src/hotspot/share/runtime/stubRoutines.hpp | 53 ++-- 43 files changed, 1392 insertions(+), 1545 deletions(-) diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index ead4220add0..f8b703fb4da 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -744,7 +744,7 @@ void MacroAssembler::reserved_stack_check() { // We have already removed our own frame. // throw_delayed_StackOverflowError will think that it's been // called by our caller. - lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); + lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); br(rscratch1); should_not_reach_here(); diff --git a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp index ee19079db7d..68800d04d69 100644 --- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp @@ -120,7 +120,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ ldr(rscratch1,Address(method, entry_offset)); __ br(rscratch1); __ bind(L_no_such_method); - __ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); + __ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry())); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, @@ -451,7 +451,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, jump_from_method_handle(_masm, rmethod, temp1, for_compiler_entry); if (iid == vmIntrinsics::_linkToInterface) { __ bind(L_incompatible_class_change_error); - __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); + __ far_jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry())); } } } diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index a4dac0ccf6d..901ae502999 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -66,6 +66,12 @@ #define __ masm-> +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; // FIXME -- this is used by C1 @@ -2764,3 +2770,193 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha // frame_size_words or bytes?? return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); } + +// Continuation point for throwing of implicit exceptions that are +// not handled in the current activation. Fabricates an exception +// oop and initiates normal exception dispatching in this +// frame. Since we need to preserve callee-saved values (currently +// only for C2, but done for C1 as well) we need a callee-saved oop +// map and therefore have to make these stubs into RuntimeStubs +// rather than BufferBlobs. If the compiler needs all registers to +// be preserved between the fault point and the exception handler +// then it must assume responsibility for that in +// AbstractCompiler::continuation_for_implicit_null_exception or +// continuation_for_implicit_division_by_zero_exception. All other +// implicit exceptions (e.g., NullPointerException or +// AbstractMethodError on entry) are either at call sites or +// otherwise assume that stack unwinding will be initiated, so +// caller saved registers were assumed volatile in the compiler. + +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + // Information about frame layout at time of blocking runtime call. + // Note that we only have to preserve callee-saved registers since + // the compilers are responsible for supplying a continuation point + // if they expect all registers to be preserved. + // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0 + enum layout { + rfp_off = 0, + rfp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 512; + int locs_size = 64; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + + // This is an inlined and slightly modified version of call_VM + // which has the ability to fetch the return PC out of + // thread-local storage and also sets up last_Java_sp slightly + // differently than the real call_VM + + __ enter(); // Save FP and LR before call + + assert(is_even(framesize/2), "sp not 16-byte aligned"); + + // lr and fp are already in place + __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog + + int frame_complete = __ pc() - start; + + // Set up last_Java_sp and last_Java_fp + address the_pc = __ pc(); + __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); + + __ mov(c_rarg0, rthread); + BLOCK_COMMENT("call runtime_entry"); + __ mov(rscratch1, runtime_entry); + __ blr(rscratch1); + + // Generate oop map + OopMap* map = new OopMap(framesize, 0); + + oop_maps->add_gc_map(the_pc - start, map); + + __ reset_last_Java_frame(true); + + // Reinitialize the ptrue predicate register, in case the external runtime + // call clobbers ptrue reg, as we may return to SVE compiled code. + __ reinitialize_ptrue(); + + __ leave(); + + // check for pending exceptions +#ifdef ASSERT + Label L; + __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); + __ cbnz(rscratch1, L); + __ should_not_reach_here(); + __ bind(L); +#endif // ASSERT + __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(name, + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +#if INCLUDE_JFR + +static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) { + __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); + __ mov(c_rarg0, thread); +} + +// The handle is dereferenced through a load barrier. +static void jfr_epilogue(MacroAssembler* masm) { + __ reset_last_Java_frame(true); +} + +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + enum layout { + rbp_off, + rbpH_off, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 1024; + int locs_size = 64; + CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm, rthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); + jfr_epilogue(masm); + __ resolve_global_jobject(r0, rscratch1, rscratch2); + __ leave(); + __ ret(lr); + + OopMap* map = new OopMap(framesize, 1); // rfp + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + enum layout { + rbp_off, + rbpH_off, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 1024; + int locs_size = 64; + + CodeBuffer code("jfr_return_lease", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm, rthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); + jfr_epilogue(masm); + + __ leave(); + __ ret(lr); + + OopMap* map = new OopMap(framesize, 1); // rfp + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 5e2ef97e4a3..b3513a586de 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -7045,7 +7045,7 @@ class StubGenerator: public StubCodeGenerator { Label thaw_success; // rscratch2 contains the size of the frames to thaw, 0 if overflow or no more frames __ cbnz(rscratch2, thaw_success); - __ lea(rscratch1, RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + __ lea(rscratch1, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); __ br(rscratch1); __ bind(thaw_success); @@ -7305,98 +7305,6 @@ class StubGenerator: public StubCodeGenerator { return start; } -#if INCLUDE_JFR - - static void jfr_prologue(address the_pc, MacroAssembler* _masm, Register thread) { - __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); - __ mov(c_rarg0, thread); - } - - // The handle is dereferenced through a load barrier. - static void jfr_epilogue(MacroAssembler* _masm) { - __ reset_last_Java_frame(true); - } - - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - static RuntimeStub* generate_jfr_write_checkpoint() { - enum layout { - rbp_off, - rbpH_off, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm, rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); - jfr_epilogue(_masm); - __ resolve_global_jobject(r0, rscratch1, rscratch2); - __ leave(); - __ ret(lr); - - OopMap* map = new OopMap(framesize, 1); // rfp - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - - // For c2: call to return a leased buffer. - static RuntimeStub* generate_jfr_return_lease() { - enum layout { - rbp_off, - rbpH_off, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_return_lease", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm, rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); - jfr_epilogue(_masm); - - __ leave(); - __ ret(lr); - - OopMap* map = new OopMap(framesize, 1); // rfp - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - -#endif // INCLUDE_JFR - // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -7412,115 +7320,9 @@ class StubGenerator: public StubCodeGenerator { return start; } - // Continuation point for throwing of implicit exceptions that are - // not handled in the current activation. Fabricates an exception - // oop and initiates normal exception dispatching in this - // frame. Since we need to preserve callee-saved values (currently - // only for C2, but done for C1 as well) we need a callee-saved oop - // map and therefore have to make these stubs into RuntimeStubs - // rather than BufferBlobs. If the compiler needs all registers to - // be preserved between the fault point and the exception handler - // then it must assume responsibility for that in - // AbstractCompiler::continuation_for_implicit_null_exception or - // continuation_for_implicit_division_by_zero_exception. All other - // implicit exceptions (e.g., NullPointerException or - // AbstractMethodError on entry) are either at call sites or - // otherwise assume that stack unwinding will be initiated, so - // caller saved registers were assumed volatile in the compiler. - #undef __ #define __ masm-> - address generate_throw_exception(const char* name, - address runtime_entry, - Register arg1 = noreg, - Register arg2 = noreg) { - // Information about frame layout at time of blocking runtime call. - // Note that we only have to preserve callee-saved registers since - // the compilers are responsible for supplying a continuation point - // if they expect all registers to be preserved. - // n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0 - enum layout { - rfp_off = 0, - rfp_off2, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 512; - int locs_size = 64; - - CodeBuffer code(name, insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - - address start = __ pc(); - - // This is an inlined and slightly modified version of call_VM - // which has the ability to fetch the return PC out of - // thread-local storage and also sets up last_Java_sp slightly - // differently than the real call_VM - - __ enter(); // Save FP and LR before call - - assert(is_even(framesize/2), "sp not 16-byte aligned"); - - // lr and fp are already in place - __ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog - - int frame_complete = __ pc() - start; - - // Set up last_Java_sp and last_Java_fp - address the_pc = __ pc(); - __ set_last_Java_frame(sp, rfp, the_pc, rscratch1); - - // Call runtime - if (arg1 != noreg) { - assert(arg2 != c_rarg1, "clobbered"); - __ mov(c_rarg1, arg1); - } - if (arg2 != noreg) { - __ mov(c_rarg2, arg2); - } - __ mov(c_rarg0, rthread); - BLOCK_COMMENT("call runtime_entry"); - __ mov(rscratch1, runtime_entry); - __ blr(rscratch1); - - // Generate oop map - OopMap* map = new OopMap(framesize, 0); - - oop_maps->add_gc_map(the_pc - start, map); - - __ reset_last_Java_frame(true); - - // Reinitialize the ptrue predicate register, in case the external runtime - // call clobbers ptrue reg, as we may return to SVE compiled code. - __ reinitialize_ptrue(); - - __ leave(); - - // check for pending exceptions -#ifdef ASSERT - Label L; - __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); - __ cbnz(rscratch1, L); - __ should_not_reach_here(); - __ bind(L); -#endif // ASSERT - __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - - // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub->entry_point(); - } - class MontgomeryMultiplyGenerator : public MacroAssembler { Register Pa_base, Pb_base, Pn_base, Pm_base, inv, Rlen, Ra, Rb, Rm, Rn, @@ -8363,16 +8165,6 @@ class StubGenerator: public StubCodeGenerator { // is referenced by megamorphic call StubRoutines::_catch_exception_entry = generate_catch_exception(); - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime::throw_StackOverflowError)); - StubRoutines::_throw_delayed_StackOverflowError_entry = - generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime::throw_delayed_StackOverflowError)); - // Initialize table for copy memory (arraycopy) check. if (UnsafeMemoryAccess::_table == nullptr) { UnsafeMemoryAccess::create_table(8 + 4); // 8 for copyMemory; 4 for setMemory @@ -8408,41 +8200,13 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) - } - -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); } -#endif // INCLUDE_JFR void generate_final_stubs() { // support for verify_oop (must happen after universe_init) if (VerifyOops) { StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); } - StubRoutines::_throw_AbstractMethodError_entry = - generate_throw_exception("AbstractMethodError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_AbstractMethodError)); - - StubRoutines::_throw_IncompatibleClassChangeError_entry = - generate_throw_exception("IncompatibleClassChangeError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_IncompatibleClassChangeError)); - - StubRoutines::_throw_NullPointerException_at_call_entry = - generate_throw_exception("NullPointerException at call throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_NullPointerException_at_call)); // arraycopy stubs used by compilers generate_arraycopy_stubs(); diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp index ed2450a9110..38d48b86f23 100644 --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -752,8 +752,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { // Note: the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); - __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); + __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); // all done with frame size check __ bind(after_frame_check); diff --git a/src/hotspot/cpu/arm/methodHandles_arm.cpp b/src/hotspot/cpu/arm/methodHandles_arm.cpp index 83939292055..7fc984afa99 100644 --- a/src/hotspot/cpu/arm/methodHandles_arm.cpp +++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp @@ -39,6 +39,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/frame.inline.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/preserveException.hpp" @@ -140,7 +141,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, bool for_comp __ bind(L_no_such_method); // throw exception - __ jump(StubRoutines::throw_AbstractMethodError_entry(), relocInfo::runtime_call_type, Rtemp); + __ jump(SharedRuntime::throw_AbstractMethodError_entry(), relocInfo::runtime_call_type, Rtemp); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, @@ -461,7 +462,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, if (iid == vmIntrinsics::_linkToInterface) { __ bind(L_incompatible_class_change_error); - __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp); + __ jump(SharedRuntime::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp); } } } diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp index 1305283aeae..f07c1f8e53c 100644 --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp @@ -1728,3 +1728,143 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } + +//------------------------------------------------------------------------------------------------------------------------ +// Continuation point for throwing of implicit exceptions that are not handled in +// the current activation. Fabricates an exception oop and initiates normal +// exception dispatching in this frame. +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + int insts_size = 128; + int locs_size = 32; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + OopMapSet* oop_maps; + int frame_size; + int frame_complete; + + oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + + frame_size = 2; + __ mov(Rexception_pc, LR); + __ raw_push(FP, LR); + + frame_complete = __ pc() - start; + + // Any extra arguments are already supposed to be R1 and R2 + __ mov(R0, Rthread); + + int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); + assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin"); + __ call(runtime_entry); + if (pc_offset == -1) { + pc_offset = __ offset(); + } + + // Generate oop map + OopMap* map = new OopMap(frame_size*VMRegImpl::slots_per_word, 0); + oop_maps->add_gc_map(pc_offset, map); + __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call + + __ raw_pop(FP, LR); + __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp); + + RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, + frame_size, oop_maps, false); + return stub; +} + +#if INCLUDE_JFR + +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + enum layout { + r1_off, + r2_off, + return_off, + framesize // inclusive of return address + }; + + CodeBuffer code("jfr_write_checkpoint", 512, 64); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ raw_push(R1, R2, LR); + address the_pc = __ pc(); + + int frame_complete = the_pc - start; + + __ set_last_Java_frame(SP, FP, true, Rtemp); + __ mov(c_rarg0, Rthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), c_rarg0); + __ reset_last_Java_frame(Rtemp); + + // R0 is jobject handle result, unpack and process it through a barrier. + __ resolve_global_jobject(R0, Rtemp, R1); + + __ raw_pop(R1, R2, LR); + __ ret(); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(frame_complete, map); + + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(code.name(), + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, + false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + enum layout { + r1_off, + r2_off, + return_off, + framesize // inclusive of return address + }; + + CodeBuffer code("jfr_return_lease", 512, 64); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ raw_push(R1, R2, LR); + address the_pc = __ pc(); + + int frame_complete = the_pc - start; + + __ set_last_Java_frame(SP, FP, true, Rtemp); + __ mov(c_rarg0, Rthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), c_rarg0); + __ reset_last_Java_frame(Rtemp); + + __ raw_pop(R1, R2, LR); + __ ret(); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(frame_complete, map); + + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(code.name(), + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, + false); + return stub; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp index 0d66a2fbb0a..9b91e02cf07 100644 --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -2961,52 +2961,6 @@ class StubGenerator: public StubCodeGenerator { #undef __ #define __ masm-> - //------------------------------------------------------------------------------------------------------------------------ - // Continuation point for throwing of implicit exceptions that are not handled in - // the current activation. Fabricates an exception oop and initiates normal - // exception dispatching in this frame. - address generate_throw_exception(const char* name, address runtime_entry) { - int insts_size = 128; - int locs_size = 32; - CodeBuffer code(name, insts_size, locs_size); - OopMapSet* oop_maps; - int frame_size; - int frame_complete; - - oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - - address start = __ pc(); - - frame_size = 2; - __ mov(Rexception_pc, LR); - __ raw_push(FP, LR); - - frame_complete = __ pc() - start; - - // Any extra arguments are already supposed to be R1 and R2 - __ mov(R0, Rthread); - - int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp); - assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin"); - __ call(runtime_entry); - if (pc_offset == -1) { - pc_offset = __ offset(); - } - - // Generate oop map - OopMap* map = new OopMap(frame_size*VMRegImpl::slots_per_word, 0); - oop_maps->add_gc_map(pc_offset, map); - __ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call - - __ raw_pop(FP, LR); - __ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp); - - RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, - frame_size, oop_maps, false); - return stub->entry_point(); - } - address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) { if (!Continuations::enabled()) return nullptr; Unimplemented(); @@ -3025,95 +2979,6 @@ class StubGenerator: public StubCodeGenerator { return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); } -#if INCLUDE_JFR - - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - static RuntimeStub* generate_jfr_write_checkpoint() { - enum layout { - r1_off, - r2_off, - return_off, - framesize // inclusive of return address - }; - - CodeBuffer code("jfr_write_checkpoint", 512, 64); - MacroAssembler* masm = new MacroAssembler(&code); - - address start = __ pc(); - __ raw_push(R1, R2, LR); - address the_pc = __ pc(); - - int frame_complete = the_pc - start; - - __ set_last_Java_frame(SP, FP, true, Rtemp); - __ mov(c_rarg0, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), c_rarg0); - __ reset_last_Java_frame(Rtemp); - - // R0 is jobject handle result, unpack and process it through a barrier. - __ resolve_global_jobject(R0, Rtemp, R1); - - __ raw_pop(R1, R2, LR); - __ ret(); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(frame_complete, map); - - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(code.name(), - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, - false); - return stub; - } - - // For c2: call to return a leased buffer. - static RuntimeStub* generate_jfr_return_lease() { - enum layout { - r1_off, - r2_off, - return_off, - framesize // inclusive of return address - }; - - CodeBuffer code("jfr_return_lease", 512, 64); - MacroAssembler* masm = new MacroAssembler(&code); - - address start = __ pc(); - __ raw_push(R1, R2, LR); - address the_pc = __ pc(); - - int frame_complete = the_pc - start; - - __ set_last_Java_frame(SP, FP, true, Rtemp); - __ mov(c_rarg0, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), c_rarg0); - __ reset_last_Java_frame(Rtemp); - - __ raw_pop(R1, R2, LR); - __ ret(); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(frame_complete, map); - - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(code.name(), - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, - false); - return stub; - } - -#endif // INCLUDE_JFR - //--------------------------------------------------------------------------- // Initialization @@ -3132,8 +2997,6 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_catch_exception_entry = generate_catch_exception(); // stub for throwing stack overflow error used both by interpreter and compiler - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); - if (UnsafeMemoryAccess::_table == nullptr) { UnsafeMemoryAccess::create_table(32 + 4); // 32 for copyMemory; 4 for setMemory } @@ -3155,28 +3018,11 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) } -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); - } -#endif // INCLUDE_JFR - void generate_final_stubs() { // Generates all stubs and initializes the entry points - // These entry points require SharedInfo::stack0 to be set up in non-core builds - // and need to be relocatable, so they each fabricate a RuntimeStub internally. - StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); - StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); - StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); - //------------------------------------------------------------------------------------------------------------------------ // entry points that are platform specific diff --git a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp index efaf78ee568..679f07a028e 100644 --- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp @@ -560,7 +560,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { __ cmp(Rtemp, R0); __ mov(SP, Rsender_sp, ls); // restore SP - __ b(StubRoutines::throw_StackOverflowError_entry(), ls); + __ b(SharedRuntime::throw_StackOverflowError_entry(), ls); } diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 544c0d120d0..c7cf678b49e 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -1501,7 +1501,7 @@ void MacroAssembler::reserved_stack_check(Register return_pc) { call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread); pop_frame(); mtlr(return_pc); - load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry()); + load_const_optimized(R0, SharedRuntime::throw_delayed_StackOverflowError_entry()); mtctr(R0); bctr(); diff --git a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp index 6e5ac325e50..ccec05e7105 100644 --- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp +++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp @@ -158,8 +158,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ bctr(); __ bind(L_no_such_method); - assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!"); - __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry()); + assert(SharedRuntime::throw_AbstractMethodError_entry() != nullptr, "not yet generated!"); + __ load_const_optimized(target, SharedRuntime::throw_AbstractMethodError_entry()); __ mtctr(target); __ bctr(); } @@ -489,7 +489,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, if (iid == vmIntrinsics::_linkToInterface) { __ BIND(L_incompatible_class_change_error); - __ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry()); + __ load_const_optimized(temp1, SharedRuntime::throw_IncompatibleClassChangeError_entry()); __ mtctr(temp1); __ bctr(); } diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 505f67ab6f9..98610d21b67 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -44,6 +44,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -3404,6 +3405,100 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha oop_maps, true); } +// Continuation point for throwing of implicit exceptions that are +// not handled in the current activation. Fabricates an exception +// oop and initiates normal exception dispatching in this +// frame. Only callee-saved registers are preserved (through the +// normal register window / RegisterMap handling). If the compiler +// needs all registers to be preserved between the fault point and +// the exception handler then it must assume responsibility for that +// in AbstractCompiler::continuation_for_implicit_null_exception or +// continuation_for_implicit_division_by_zero_exception. All other +// implicit exceptions (e.g., NullPointerException or +// AbstractMethodError on entry) are either at call sites or +// otherwise assume that stack unwinding will be initiated, so +// caller saved registers were assumed volatile in the compiler. +// +// Note that we generate only this stub into a RuntimeStub, because +// it needs to be properly traversed and ignored during GC, so we +// change the meaning of the "__" macro within this method. +// +// Note: the routine set_pc_not_at_call_for_caller in +// SharedRuntime.cpp requires that this code be generated into a +// RuntimeStub. +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); + MacroAssembler* masm = new MacroAssembler(&code); + + OopMapSet* oop_maps = new OopMapSet(); + int frame_size_in_bytes = frame::native_abi_reg_args_size; + OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); + + address start = __ pc(); + + __ save_LR(R11_scratch1); + + // Push a frame. + __ push_frame_reg_args(0, R11_scratch1); + + address frame_complete_pc = __ pc(); + + // Note that we always have a runtime stub frame on the top of + // stack by this point. Remember the offset of the instruction + // whose address will be moved to R11_scratch1. + address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); + + __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); + + __ mr(R3_ARG1, R16_thread); +#if defined(ABI_ELFv2) + __ call_c(runtime_entry, relocInfo::none); +#else + __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); +#endif + + // Set an oopmap for the call site. + oop_maps->add_gc_map((int)(gc_map_pc - start), map); + + __ reset_last_Java_frame(); + +#ifdef ASSERT + // Make sure that this code is only executed if there is a pending + // exception. + { + Label L; + __ ld(R0, + in_bytes(Thread::pending_exception_offset()), + R16_thread); + __ cmpdi(CCR0, R0, 0); + __ bne(CCR0, L); + __ stop("SharedRuntime::throw_exception: no pending exception"); + __ bind(L); + } +#endif + + // Pop frame. + __ pop_frame(); + + __ restore_LR(R11_scratch1); + + __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); + __ mtctr(R11_scratch1); + __ bctr(); + + // Create runtime stub with OopMap. + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(name, &code, + /*frame_complete=*/ (int)(frame_complete_pc - start), + frame_size_in_bytes/wordSize, + oop_maps, + false); + return stub; +} //------------------------------Montgomery multiplication------------------------ // @@ -3647,3 +3742,81 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, reverse_words(m, (unsigned long *)m_ints, longwords); } + +#if INCLUDE_JFR + +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + CodeBuffer code("jfr_write_checkpoint", 512, 64); + MacroAssembler* masm = new MacroAssembler(&code); + + Register tmp1 = R10_ARG8; + Register tmp2 = R9_ARG7; + + int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; + address start = __ pc(); + __ mflr(tmp1); + __ std(tmp1, _abi0(lr), R1_SP); // save return pc + __ push_frame_reg_args(0, tmp1); + int frame_complete = __ pc() - start; + __ set_last_Java_frame(R1_SP, noreg); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread); + address calls_return_pc = __ last_calls_return_pc(); + __ reset_last_Java_frame(); + // The handle is dereferenced through a load barrier. + __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE); + __ pop_frame(); + __ ld(tmp1, _abi0(lr), R1_SP); + __ mtlr(tmp1); + __ blr(); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 0); + oop_maps->add_gc_map(calls_return_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub(code.name(), + &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + CodeBuffer code("jfr_return_lease", 512, 64); + MacroAssembler* masm = new MacroAssembler(&code); + + Register tmp1 = R10_ARG8; + Register tmp2 = R9_ARG7; + + int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; + address start = __ pc(); + __ mflr(tmp1); + __ std(tmp1, _abi0(lr), R1_SP); // save return pc + __ push_frame_reg_args(0, tmp1); + int frame_complete = __ pc() - start; + __ set_last_Java_frame(R1_SP, noreg); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread); + address calls_return_pc = __ last_calls_return_pc(); + __ reset_last_Java_frame(); + __ pop_frame(); + __ ld(tmp1, _abi0(lr), R1_SP); + __ mtlr(tmp1); + __ blr(); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 0); + oop_maps->add_gc_map(calls_return_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub(code.name(), + &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index 8da7cf7e791..ee3f1911e20 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -517,109 +517,6 @@ class StubGenerator: public StubCodeGenerator { } #undef __ -#define __ masm-> - // Continuation point for throwing of implicit exceptions that are - // not handled in the current activation. Fabricates an exception - // oop and initiates normal exception dispatching in this - // frame. Only callee-saved registers are preserved (through the - // normal register window / RegisterMap handling). If the compiler - // needs all registers to be preserved between the fault point and - // the exception handler then it must assume responsibility for that - // in AbstractCompiler::continuation_for_implicit_null_exception or - // continuation_for_implicit_division_by_zero_exception. All other - // implicit exceptions (e.g., NullPointerException or - // AbstractMethodError on entry) are either at call sites or - // otherwise assume that stack unwinding will be initiated, so - // caller saved registers were assumed volatile in the compiler. - // - // Note that we generate only this stub into a RuntimeStub, because - // it needs to be properly traversed and ignored during GC, so we - // change the meaning of the "__" macro within this method. - // - // Note: the routine set_pc_not_at_call_for_caller in - // SharedRuntime.cpp requires that this code be generated into a - // RuntimeStub. - address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, - Register arg1 = noreg, Register arg2 = noreg) { - CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); - MacroAssembler* masm = new MacroAssembler(&code); - - OopMapSet* oop_maps = new OopMapSet(); - int frame_size_in_bytes = frame::native_abi_reg_args_size; - OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); - - address start = __ pc(); - - __ save_LR(R11_scratch1); - - // Push a frame. - __ push_frame_reg_args(0, R11_scratch1); - - address frame_complete_pc = __ pc(); - - if (restore_saved_exception_pc) { - __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc"); - } - - // Note that we always have a runtime stub frame on the top of - // stack by this point. Remember the offset of the instruction - // whose address will be moved to R11_scratch1. - address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); - - __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); - - __ mr(R3_ARG1, R16_thread); - if (arg1 != noreg) { - __ mr(R4_ARG2, arg1); - } - if (arg2 != noreg) { - __ mr(R5_ARG3, arg2); - } -#if defined(ABI_ELFv2) - __ call_c(runtime_entry, relocInfo::none); -#else - __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); -#endif - - // Set an oopmap for the call site. - oop_maps->add_gc_map((int)(gc_map_pc - start), map); - - __ reset_last_Java_frame(); - -#ifdef ASSERT - // Make sure that this code is only executed if there is a pending - // exception. - { - Label L; - __ ld(R0, - in_bytes(Thread::pending_exception_offset()), - R16_thread); - __ cmpdi(CCR0, R0, 0); - __ bne(CCR0, L); - __ stop("StubRoutines::throw_exception: no pending exception"); - __ bind(L); - } -#endif - - // Pop frame. - __ pop_frame(); - - __ restore_LR(R11_scratch1); - - __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); - __ mtctr(R11_scratch1); - __ bctr(); - - // Create runtime stub with OopMap. - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, &code, - /*frame_complete=*/ (int)(frame_complete_pc - start), - frame_size_in_bytes/wordSize, - oop_maps, - false); - return stub->entry_point(); - } -#undef __ #define __ _masm-> @@ -4616,7 +4513,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { Label thaw_success; __ cmpdi(CCR0, R3_RET, 0); __ bne(CCR0, thaw_success); - __ load_const_optimized(tmp1, (StubRoutines::throw_StackOverflowError_entry()), R0); + __ load_const_optimized(tmp1, (SharedRuntime::throw_StackOverflowError_entry()), R0); __ mtctr(tmp1); __ bctr(); __ bind(thaw_success); @@ -4675,84 +4572,6 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); } -#if INCLUDE_JFR - - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - RuntimeStub* generate_jfr_write_checkpoint() { - CodeBuffer code("jfr_write_checkpoint", 512, 64); - MacroAssembler* _masm = new MacroAssembler(&code); - - Register tmp1 = R10_ARG8; - Register tmp2 = R9_ARG7; - - int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; - address start = __ pc(); - __ mflr(tmp1); - __ std(tmp1, _abi0(lr), R1_SP); // save return pc - __ push_frame_reg_args(0, tmp1); - int frame_complete = __ pc() - start; - __ set_last_Java_frame(R1_SP, noreg); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread); - address calls_return_pc = __ last_calls_return_pc(); - __ reset_last_Java_frame(); - // The handle is dereferenced through a load barrier. - __ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE); - __ pop_frame(); - __ ld(tmp1, _abi0(lr), R1_SP); - __ mtlr(tmp1); - __ blr(); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 0); - oop_maps->add_gc_map(calls_return_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub(code.name(), - &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - - // For c2: call to return a leased buffer. - RuntimeStub* generate_jfr_return_lease() { - CodeBuffer code("jfr_return_lease", 512, 64); - MacroAssembler* _masm = new MacroAssembler(&code); - - Register tmp1 = R10_ARG8; - Register tmp2 = R9_ARG7; - - int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size; - address start = __ pc(); - __ mflr(tmp1); - __ std(tmp1, _abi0(lr), R1_SP); // save return pc - __ push_frame_reg_args(0, tmp1); - int frame_complete = __ pc() - start; - __ set_last_Java_frame(R1_SP, noreg); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread); - address calls_return_pc = __ last_calls_return_pc(); - __ reset_last_Java_frame(); - __ pop_frame(); - __ ld(tmp1, _abi0(lr), R1_SP); - __ mtlr(tmp1); - __ blr(); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 0); - oop_maps->add_gc_map(calls_return_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub(code.name(), - &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - -#endif // INCLUDE_JFR - // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -4786,14 +4605,6 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { UnsafeMemoryAccess::create_table(8 + 4); // 8 for copyMemory; 4 for setMemory } - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); - StubRoutines::_throw_delayed_StackOverflowError_entry = - generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false); - // CRC32 Intrinsics. if (UseCRC32Intrinsics) { StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY); @@ -4812,29 +4623,11 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) - } - -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); } -#endif // INCLUDE_JFR void generate_final_stubs() { // Generates all stubs and initializes the entry points - // These entry points require SharedInfo::stack0 to be set up in - // non-core builds - StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); - // Handle IncompatibleClassChangeError in itable stubs. - StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); - StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); - // support for verify_oop (must happen after universe_init) StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index 4caae200253..bb746619616 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -783,8 +783,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f __ bgt(CCR0/*is_stack_overflow*/, done); // The stack overflows. Load target address of the runtime stub and call it. - assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "generated in wrong order"); - __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0); + assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order"); + __ load_const_optimized(Rscratch1, (SharedRuntime::throw_StackOverflowError_entry()), R0); __ mtctr(Rscratch1); // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame). #ifdef ASSERT diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index c6624cb45bb..32ccba6b0ce 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -4144,7 +4144,7 @@ void MacroAssembler::reserved_stack_check() { // We have already removed our own frame. // throw_delayed_StackOverflowError will think that it's been // called by our caller. - la(t0, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); + la(t0, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); jr(t0); should_not_reach_here(); diff --git a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp index fbb5b914038..deeb771d83b 100644 --- a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp +++ b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp @@ -120,7 +120,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ ld(t0,Address(method, entry_offset)); __ jr(t0); __ bind(L_no_such_method); - __ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); + __ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry())); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, @@ -441,7 +441,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, jump_from_method_handle(_masm, xmethod, temp1, for_compiler_entry); if (iid == vmIntrinsics::_linkToInterface) { __ bind(L_incompatible_class_change_error); - __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); + __ far_jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry())); } } diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index ad06f688d6a..bed24e442e8 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -48,6 +48,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp" #include "utilities/formatBuffer.hpp" @@ -65,6 +66,12 @@ #define __ masm-> +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; class RegisterSaver { @@ -2628,3 +2635,191 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha // return the blob return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); } + +// Continuation point for throwing of implicit exceptions that are +// not handled in the current activation. Fabricates an exception +// oop and initiates normal exception dispatching in this +// frame. Since we need to preserve callee-saved values (currently +// only for C2, but done for C1 as well) we need a callee-saved oop +// map and therefore have to make these stubs into RuntimeStubs +// rather than BufferBlobs. If the compiler needs all registers to +// be preserved between the fault point and the exception handler +// then it must assume responsibility for that in +// AbstractCompiler::continuation_for_implicit_null_exception or +// continuation_for_implicit_division_by_zero_exception. All other +// implicit exceptions (e.g., NullPointerException or +// AbstractMethodError on entry) are either at call sites or +// otherwise assume that stack unwinding will be initiated, so +// caller saved registers were assumed volatile in the compiler. + +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + // Information about frame layout at time of blocking runtime call. + // Note that we only have to preserve callee-saved registers since + // the compilers are responsible for supplying a continuation point + // if they expect all registers to be preserved. + // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0 + assert_cond(runtime_entry != nullptr); + enum layout { + fp_off = 0, + fp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + const int insts_size = 1024; + const int locs_size = 64; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + assert_cond(oop_maps != nullptr && masm != nullptr); + + address start = __ pc(); + + // This is an inlined and slightly modified version of call_VM + // which has the ability to fetch the return PC out of + // thread-local storage and also sets up last_Java_sp slightly + // differently than the real call_VM + + __ enter(); // Save FP and RA before call + + assert(is_even(framesize / 2), "sp not 16-byte aligned"); + + // ra and fp are already in place + __ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog + + int frame_complete = __ pc() - start; + + // Set up last_Java_sp and last_Java_fp + address the_pc = __ pc(); + __ set_last_Java_frame(sp, fp, the_pc, t0); + + // Call runtime + __ mv(c_rarg0, xthread); + BLOCK_COMMENT("call runtime_entry"); + __ rt_call(runtime_entry); + + // Generate oop map + OopMap* map = new OopMap(framesize, 0); + assert_cond(map != nullptr); + + oop_maps->add_gc_map(the_pc - start, map); + + __ reset_last_Java_frame(true); + + __ leave(); + + // check for pending exceptions +#ifdef ASSERT + Label L; + __ ld(t0, Address(xthread, Thread::pending_exception_offset())); + __ bnez(t0, L); + __ should_not_reach_here(); + __ bind(L); +#endif // ASSERT + __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(name, + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + assert(stub != nullptr, "create runtime stub fail!"); + return stub; +} + +#if INCLUDE_JFR + +static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) { + __ set_last_Java_frame(sp, fp, the_pc, t0); + __ mv(c_rarg0, thread); +} + +static void jfr_epilogue(MacroAssembler* masm) { + __ reset_last_Java_frame(true); +} +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + enum layout { + fp_off, + fp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 1024; + int locs_size = 64; + CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm, xthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); + + jfr_epilogue(masm); + __ resolve_global_jobject(x10, t0, t1); + __ leave(); + __ ret(); + + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + enum layout { + fp_off, + fp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 1024; + int locs_size = 64; + CodeBuffer code("jfr_return_lease", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm, xthread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); + + jfr_epilogue(masm); + __ leave(); + __ ret(); + + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index 6a2c6c7d6c9..f214c489557 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -3774,7 +3774,7 @@ class StubGenerator: public StubCodeGenerator { Label thaw_success; // t1 contains the size of the frames to thaw, 0 if overflow or no more frames __ bnez(t1, thaw_success); - __ la(t0, RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + __ la(t0, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); __ jr(t0); __ bind(thaw_success); @@ -5834,97 +5834,6 @@ static const int64_t right_3_bits = right_n_bits(3); return start; } -#if INCLUDE_JFR - - static void jfr_prologue(address the_pc, MacroAssembler* _masm, Register thread) { - __ set_last_Java_frame(sp, fp, the_pc, t0); - __ mv(c_rarg0, thread); - } - - static void jfr_epilogue(MacroAssembler* _masm) { - __ reset_last_Java_frame(true); - } - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - static RuntimeStub* generate_jfr_write_checkpoint() { - enum layout { - fp_off, - fp_off2, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm, xthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); - - jfr_epilogue(_masm); - __ resolve_global_jobject(x10, t0, t1); - __ leave(); - __ ret(); - - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - - // For c2: call to return a leased buffer. - static RuntimeStub* generate_jfr_return_lease() { - enum layout { - fp_off, - fp_off2, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_return_lease", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm, xthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); - - jfr_epilogue(_masm); - __ leave(); - __ ret(); - - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - -#endif // INCLUDE_JFR - // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -5939,114 +5848,6 @@ static const int64_t right_3_bits = right_n_bits(3); return start; } - // Continuation point for throwing of implicit exceptions that are - // not handled in the current activation. Fabricates an exception - // oop and initiates normal exception dispatching in this - // frame. Since we need to preserve callee-saved values (currently - // only for C2, but done for C1 as well) we need a callee-saved oop - // map and therefore have to make these stubs into RuntimeStubs - // rather than BufferBlobs. If the compiler needs all registers to - // be preserved between the fault point and the exception handler - // then it must assume responsibility for that in - // AbstractCompiler::continuation_for_implicit_null_exception or - // continuation_for_implicit_division_by_zero_exception. All other - // implicit exceptions (e.g., NullPointerException or - // AbstractMethodError on entry) are either at call sites or - // otherwise assume that stack unwinding will be initiated, so - // caller saved registers were assumed volatile in the compiler. - -#undef __ -#define __ masm-> - - address generate_throw_exception(const char* name, - address runtime_entry, - Register arg1 = noreg, - Register arg2 = noreg) { - // Information about frame layout at time of blocking runtime call. - // Note that we only have to preserve callee-saved registers since - // the compilers are responsible for supplying a continuation point - // if they expect all registers to be preserved. - // n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0 - assert_cond(runtime_entry != nullptr); - enum layout { - fp_off = 0, - fp_off2, - return_off, - return_off2, - framesize // inclusive of return address - }; - - const int insts_size = 1024; - const int locs_size = 64; - - CodeBuffer code(name, insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - assert_cond(oop_maps != nullptr && masm != nullptr); - - address start = __ pc(); - - // This is an inlined and slightly modified version of call_VM - // which has the ability to fetch the return PC out of - // thread-local storage and also sets up last_Java_sp slightly - // differently than the real call_VM - - __ enter(); // Save FP and RA before call - - assert(is_even(framesize / 2), "sp not 16-byte aligned"); - - // ra and fp are already in place - __ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog - - int frame_complete = __ pc() - start; - - // Set up last_Java_sp and last_Java_fp - address the_pc = __ pc(); - __ set_last_Java_frame(sp, fp, the_pc, t0); - - // Call runtime - if (arg1 != noreg) { - assert(arg2 != c_rarg1, "clobbered"); - __ mv(c_rarg1, arg1); - } - if (arg2 != noreg) { - __ mv(c_rarg2, arg2); - } - __ mv(c_rarg0, xthread); - BLOCK_COMMENT("call runtime_entry"); - __ rt_call(runtime_entry); - - // Generate oop map - OopMap* map = new OopMap(framesize, 0); - assert_cond(map != nullptr); - - oop_maps->add_gc_map(the_pc - start, map); - - __ reset_last_Java_frame(true); - - __ leave(); - - // check for pending exceptions -#ifdef ASSERT - Label L; - __ ld(t0, Address(xthread, Thread::pending_exception_offset())); - __ bnez(t0, L); - __ should_not_reach_here(); - __ bind(L); -#endif // ASSERT - __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - - // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - assert(stub != nullptr, "create runtime stub fail!"); - return stub->entry_point(); - } - #undef __ // Initialization @@ -6071,16 +5872,6 @@ static const int64_t right_3_bits = right_n_bits(3); // is referenced by megamorphic call StubRoutines::_catch_exception_entry = generate_catch_exception(); - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime::throw_StackOverflowError)); - StubRoutines::_throw_delayed_StackOverflowError_entry = - generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime::throw_delayed_StackOverflowError)); - if (UseCRC32Intrinsics) { // set table address before stub generation which use it StubRoutines::_crc_table_adr = (address)StubRoutines::riscv::_crc_table; @@ -6093,42 +5884,14 @@ static const int64_t right_3_bits = right_n_bits(3); StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) } -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); - } -#endif // INCLUDE_JFR - void generate_final_stubs() { // support for verify_oop (must happen after universe_init) if (VerifyOops) { StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); } - StubRoutines::_throw_AbstractMethodError_entry = - generate_throw_exception("AbstractMethodError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_AbstractMethodError)); - - StubRoutines::_throw_IncompatibleClassChangeError_entry = - generate_throw_exception("IncompatibleClassChangeError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_IncompatibleClassChangeError)); - - StubRoutines::_throw_NullPointerException_at_call_entry = - generate_throw_exception("NullPointerException at call throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_NullPointerException_at_call)); // arraycopy stubs used by compilers generate_arraycopy_stubs(); diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index f01945bc6a3..1f32488777d 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -658,8 +658,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { // Note: the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); - __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); + __ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); // all done with frame size check __ bind(after_frame_check); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index b72b36eef53..a233934405f 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -2751,7 +2751,7 @@ void MacroAssembler::reserved_stack_check(Register return_pc) { pop_frame(); restore_return_pc(); - load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); + load_const_optimized(Z_R1, SharedRuntime::throw_delayed_StackOverflowError_entry()); // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. z_br(Z_R1); diff --git a/src/hotspot/cpu/s390/methodHandles_s390.cpp b/src/hotspot/cpu/s390/methodHandles_s390.cpp index ef8722f2499..b42822a6eee 100644 --- a/src/hotspot/cpu/s390/methodHandles_s390.cpp +++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp @@ -180,8 +180,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ z_br(target); __ bind(L_no_such_method); - assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!"); - __ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry()); + assert(SharedRuntime::throw_AbstractMethodError_entry() != nullptr, "not yet generated!"); + __ load_const_optimized(target, SharedRuntime::throw_AbstractMethodError_entry()); __ z_br(target); } @@ -543,7 +543,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, __ bind(L_no_such_interface); // Throw exception. - __ load_const_optimized(Z_R1, StubRoutines::throw_IncompatibleClassChangeError_entry()); + __ load_const_optimized(Z_R1, SharedRuntime::throw_IncompatibleClassChangeError_entry()); __ z_br(Z_R1); break; } diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index 641b3712a17..364ef948d91 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -3010,6 +3010,85 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha } +// Continuation point for throwing of implicit exceptions that are +// not handled in the current activation. Fabricates an exception +// oop and initiates normal exception dispatching in this +// frame. Only callee-saved registers are preserved (through the +// normal RegisterMap handling). If the compiler +// needs all registers to be preserved between the fault point and +// the exception handler then it must assume responsibility for that +// in AbstractCompiler::continuation_for_implicit_null_exception or +// continuation_for_implicit_division_by_zero_exception. All other +// implicit exceptions (e.g., NullPointerException or +// AbstractMethodError on entry) are either at call sites or +// otherwise assume that stack unwinding will be initiated, so +// caller saved registers were assumed volatile in the compiler. + +// Note that we generate only this stub into a RuntimeStub, because +// it needs to be properly traversed and ignored during GC, so we +// change the meaning of the "__" macro within this method. + +// Note: the routine set_pc_not_at_call_for_caller in +// SharedRuntime.cpp requires that this code be generated into a +// RuntimeStub. + +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + + int insts_size = 256; + int locs_size = 0; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + MacroAssembler* masm = new MacroAssembler(&code); + int framesize_in_bytes; + address start = __ pc(); + + __ save_return_pc(); + framesize_in_bytes = __ push_frame_abi160(0); + + address frame_complete_pc = __ pc(); + + // Note that we always have a runtime stub frame on the top of stack at this point. + __ get_PC(Z_R1); + __ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1); + + // Do the call. + BLOCK_COMMENT("call runtime_entry"); + __ call_VM_leaf(runtime_entry, Z_thread); + + __ reset_last_Java_frame(); + +#ifdef ASSERT + // Make sure that this code is only executed if there is a pending exception. + { Label L; + __ z_lg(Z_R0, + in_bytes(Thread::pending_exception_offset()), + Z_thread); + __ z_ltgr(Z_R0, Z_R0); + __ z_brne(L); + __ stop("SharedRuntime::throw_exception: no pending exception"); + __ bind(L); + } +#endif + + __ pop_frame(); + __ restore_return_pc(); + + __ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); + __ z_br(Z_R1); + + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(name, &code, + frame_complete_pc - start, + framesize_in_bytes/wordSize, + nullptr /*oop_maps*/, false); + + return stub; +} + //------------------------------Montgomery multiplication------------------------ // @@ -3263,3 +3342,18 @@ extern "C" int SpinPause() { return 0; } + +#if INCLUDE_JFR +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + if (!Continuations::enabled()) return nullptr; + Unimplemented(); + return nullptr; +} + +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + if (!Continuations::enabled()) return nullptr; + Unimplemented(); + return nullptr; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index 556c0d4703d..d878731cca5 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -572,89 +572,6 @@ class StubGenerator: public StubCodeGenerator { #undef pending_exception_offset } - // Continuation point for throwing of implicit exceptions that are - // not handled in the current activation. Fabricates an exception - // oop and initiates normal exception dispatching in this - // frame. Only callee-saved registers are preserved (through the - // normal RegisterMap handling). If the compiler - // needs all registers to be preserved between the fault point and - // the exception handler then it must assume responsibility for that - // in AbstractCompiler::continuation_for_implicit_null_exception or - // continuation_for_implicit_division_by_zero_exception. All other - // implicit exceptions (e.g., NullPointerException or - // AbstractMethodError on entry) are either at call sites or - // otherwise assume that stack unwinding will be initiated, so - // caller saved registers were assumed volatile in the compiler. - - // Note that we generate only this stub into a RuntimeStub, because - // it needs to be properly traversed and ignored during GC, so we - // change the meaning of the "__" macro within this method. - - // Note: the routine set_pc_not_at_call_for_caller in - // SharedRuntime.cpp requires that this code be generated into a - // RuntimeStub. -#undef __ -#define __ masm-> - - address generate_throw_exception(const char* name, address runtime_entry, - bool restore_saved_exception_pc, - Register arg1 = noreg, Register arg2 = noreg) { - assert_different_registers(arg1, Z_R0_scratch); // would be destroyed by push_frame() - assert_different_registers(arg2, Z_R0_scratch); // would be destroyed by push_frame() - - int insts_size = 256; - int locs_size = 0; - CodeBuffer code(name, insts_size, locs_size); - MacroAssembler* masm = new MacroAssembler(&code); - int framesize_in_bytes; - address start = __ pc(); - - __ save_return_pc(); - framesize_in_bytes = __ push_frame_abi160(0); - - address frame_complete_pc = __ pc(); - if (restore_saved_exception_pc) { - __ unimplemented("StubGenerator::throw_exception", 74); - } - - // Note that we always have a runtime stub frame on the top of stack at this point. - __ get_PC(Z_R1); - __ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1); - - // Do the call. - BLOCK_COMMENT("call runtime_entry"); - __ call_VM_leaf(runtime_entry, Z_thread, arg1, arg2); - - __ reset_last_Java_frame(); - -#ifdef ASSERT - // Make sure that this code is only executed if there is a pending exception. - { Label L; - __ z_lg(Z_R0, - in_bytes(Thread::pending_exception_offset()), - Z_thread); - __ z_ltgr(Z_R0, Z_R0); - __ z_brne(L); - __ stop("StubRoutines::throw_exception: no pending exception"); - __ bind(L); - } -#endif - - __ pop_frame(); - __ restore_return_pc(); - - __ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); - __ z_br(Z_R1); - - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, &code, - frame_complete_pc - start, - framesize_in_bytes/wordSize, - nullptr /*oop_maps*/, false); - - return stub->entry_point(); - } - #undef __ #ifdef PRODUCT #define __ _masm-> @@ -3121,21 +3038,6 @@ class StubGenerator: public StubCodeGenerator { return nullptr; } - #if INCLUDE_JFR - RuntimeStub* generate_jfr_write_checkpoint() { - if (!Continuations::enabled()) return nullptr; - Unimplemented(); - return nullptr; - } - - RuntimeStub* generate_jfr_return_lease() { - if (!Continuations::enabled()) return nullptr; - Unimplemented(); - return nullptr; - } - - #endif // INCLUDE_JFR - // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -3164,14 +3066,6 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); StubRoutines::_catch_exception_entry = generate_catch_exception(); - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); - StubRoutines::_throw_delayed_StackOverflowError_entry = - generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false); - //---------------------------------------------------------------------- // Entry points that are platform specific. @@ -3196,29 +3090,13 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) } -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); - } -#endif // INCLUDE_JFR - void generate_final_stubs() { // Generates all stubs and initializes the entry points. StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); - // These entry points require SharedInfo::stack0 to be set up in non-core builds. - StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); - StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); - StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); - // Support for verify_oop (must happen after universe_init). StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); diff --git a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp index 87025826400..c16e4449045 100644 --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp @@ -850,9 +850,9 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_ // Note also that the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); - AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry()); - __ load_absolute_address(tmp1, StubRoutines::throw_StackOverflowError_entry()); + assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); + AddressLiteral stub(SharedRuntime::throw_StackOverflowError_entry()); + __ load_absolute_address(tmp1, SharedRuntime::throw_StackOverflowError_entry()); __ z_br(tmp1); // If you get to here, then there is enough stack space. diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index a5ad19806ea..ba337751d19 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1303,7 +1303,7 @@ void MacroAssembler::reserved_stack_check() { jcc(Assembler::below, no_reserved_zone_enabling); call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); - jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); + jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); should_not_reach_here(); bind(no_reserved_zone_enabling); diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp index 3a8c1048766..fd738b7333e 100644 --- a/src/hotspot/cpu/x86/methodHandles_x86.cpp +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp @@ -159,7 +159,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth __ jmp(Address(method, entry_offset)); __ bind(L_no_such_method); - __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry())); + __ jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry())); } void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, @@ -510,7 +510,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, if (iid == vmIntrinsics::_linkToInterface) { __ bind(L_incompatible_class_change_error); - __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); + __ jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry())); } } } diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index 85c9125a97d..273bbcc6525 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -43,6 +43,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vm_version.hpp" #include "utilities/align.hpp" @@ -56,6 +57,12 @@ #define __ masm-> +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif // PRODUCT + const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; class RegisterSaver { @@ -2631,3 +2638,207 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha // frame_size_words or bytes?? return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); } + + //------------------------------------------------------------------------------------------------------------------------ + // Continuation point for throwing of implicit exceptions that are not handled in + // the current activation. Fabricates an exception oop and initiates normal + // exception dispatching in this frame. + // + // Previously the compiler (c2) allowed for callee save registers on Java calls. + // This is no longer true after adapter frames were removed but could possibly + // be brought back in the future if the interpreter code was reworked and it + // was deemed worthwhile. The comment below was left to describe what must + // happen here if callee saves were resurrected. As it stands now this stub + // could actually be a vanilla BufferBlob and have now oopMap at all. + // Since it doesn't make much difference we've chosen to leave it the + // way it was in the callee save days and keep the comment. + + // If we need to preserve callee-saved values we need a callee-saved oop map and + // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. + // If the compiler needs all registers to be preserved between the fault + // point and the exception handler then it must assume responsibility for that in + // AbstractCompiler::continuation_for_implicit_null_exception or + // continuation_for_implicit_division_by_zero_exception. All other implicit + // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are + // either at call sites or otherwise assume that stack unwinding will be initiated, + // so caller saved registers were assumed volatile in the compiler. +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + + // Information about frame layout at time of blocking runtime call. + // Note that we only have to preserve callee-saved registers since + // the compilers are responsible for supplying a continuation point + // if they expect all registers to be preserved. + enum layout { + thread_off, // last_java_sp + arg1_off, + arg2_off, + rbp_off, // callee saved register + ret_pc, + framesize + }; + + int insts_size = 256; + int locs_size = 32; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + + // This is an inlined and slightly modified version of call_VM + // which has the ability to fetch the return PC out of + // thread-local storage and also sets up last_Java_sp slightly + // differently than the real call_VM + Register java_thread = rbx; + __ get_thread(java_thread); + + __ enter(); // required for proper stackwalking of RuntimeStub frame + + // pc and rbp, already pushed + __ subptr(rsp, (framesize-2) * wordSize); // prolog + + // Frame is now completed as far as size and linkage. + + int frame_complete = __ pc() - start; + + // push java thread (becomes first argument of C function) + __ movptr(Address(rsp, thread_off * wordSize), java_thread); + // Set up last_Java_sp and last_Java_fp + __ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg); + + // Call runtime + BLOCK_COMMENT("call runtime_entry"); + __ call(RuntimeAddress(runtime_entry)); + // Generate oop map + OopMap* map = new OopMap(framesize, 0); + oop_maps->add_gc_map(__ pc() - start, map); + + // restore the thread (cannot use the pushed argument since arguments + // may be overwritten by C code generated by an optimizing compiler); + // however can use the register value directly if it is callee saved. + __ get_thread(java_thread); + + __ reset_last_Java_frame(java_thread, true); + + __ leave(); // required for proper stackwalking of RuntimeStub frame + + // check for pending exceptions +#ifdef ASSERT + Label L; + __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); + __ jcc(Assembler::notEqual, L); + __ should_not_reach_here(); + __ bind(L); +#endif /* ASSERT */ + __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + + RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); + return stub; +} + +#if INCLUDE_JFR + +static void jfr_prologue(address the_pc, MacroAssembler* masm) { + Register java_thread = rdi; + __ get_thread(java_thread); + __ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg); + __ movptr(Address(rsp, 0), java_thread); +} + +// The handle is dereferenced through a load barrier. +static void jfr_epilogue(MacroAssembler* masm) { + Register java_thread = rdi; + __ get_thread(java_thread); + __ reset_last_Java_frame(java_thread, true); +} + +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + enum layout { + FPUState_off = 0, + rbp_off = FPUStateSizeInWords, + rdi_off, + rsi_off, + rcx_off, + rbx_off, + saved_argument_off, + saved_argument_off2, // 2nd half of double + framesize + }; + + int insts_size = 1024; + int locs_size = 64; + CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); + jfr_epilogue(masm); + __ resolve_global_jobject(rax, rdi, rdx); + __ leave(); + __ ret(0); + + OopMap* map = new OopMap(framesize, 1); // rbp + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + enum layout { + FPUState_off = 0, + rbp_off = FPUStateSizeInWords, + rdi_off, + rsi_off, + rcx_off, + rbx_off, + saved_argument_off, + saved_argument_off2, // 2nd half of double + framesize + }; + + int insts_size = 1024; + int locs_size = 64; + CodeBuffer code("jfr_return_lease", insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + __ enter(); + int frame_complete = __ pc() - start; + address the_pc = __ pc(); + jfr_prologue(the_pc, masm); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); + jfr_epilogue(masm); + __ leave(); + __ ret(0); + + OopMap* map = new OopMap(framesize, 1); // rbp + oop_maps->add_gc_map(the_pc - start, map); + + RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + +#endif // INCLUDE_JFR diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index b5362a9942c..05ec85ef09a 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -52,6 +52,7 @@ #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vm_version.hpp" #include "utilities/align.hpp" @@ -70,6 +71,12 @@ #define __ masm-> +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif // PRODUCT + const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; class RegisterSaver { @@ -3210,6 +3217,101 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); } +// Continuation point for throwing of implicit exceptions that are +// not handled in the current activation. Fabricates an exception +// oop and initiates normal exception dispatching in this +// frame. Since we need to preserve callee-saved values (currently +// only for C2, but done for C1 as well) we need a callee-saved oop +// map and therefore have to make these stubs into RuntimeStubs +// rather than BufferBlobs. If the compiler needs all registers to +// be preserved between the fault point and the exception handler +// then it must assume responsibility for that in +// AbstractCompiler::continuation_for_implicit_null_exception or +// continuation_for_implicit_division_by_zero_exception. All other +// implicit exceptions (e.g., NullPointerException or +// AbstractMethodError on entry) are either at call sites or +// otherwise assume that stack unwinding will be initiated, so +// caller saved registers were assumed volatile in the compiler. +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + // Information about frame layout at time of blocking runtime call. + // Note that we only have to preserve callee-saved registers since + // the compilers are responsible for supplying a continuation point + // if they expect all registers to be preserved. + enum layout { + rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, + rbp_off2, + return_off, + return_off2, + framesize // inclusive of return address + }; + + int insts_size = 512; + int locs_size = 64; + + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_throw_exception"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + CodeBuffer code(name, insts_size, locs_size); + OopMapSet* oop_maps = new OopMapSet(); + MacroAssembler* masm = new MacroAssembler(&code); + + address start = __ pc(); + + // This is an inlined and slightly modified version of call_VM + // which has the ability to fetch the return PC out of + // thread-local storage and also sets up last_Java_sp slightly + // differently than the real call_VM + + __ enter(); // required for proper stackwalking of RuntimeStub frame + + assert(is_even(framesize/2), "sp not 16-byte aligned"); + + // return address and rbp are already in place + __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog + + int frame_complete = __ pc() - start; + + // Set up last_Java_sp and last_Java_fp + address the_pc = __ pc(); + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); + __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack + + // Call runtime + __ movptr(c_rarg0, r15_thread); + BLOCK_COMMENT("call runtime_entry"); + __ call(RuntimeAddress(runtime_entry)); + + // Generate oop map + OopMap* map = new OopMap(framesize, 0); + + oop_maps->add_gc_map(the_pc - start, map); + + __ reset_last_Java_frame(true); + + __ leave(); // required for proper stackwalking of RuntimeStub frame + + // check for pending exceptions +#ifdef ASSERT + Label L; + __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD); + __ jcc(Assembler::notEqual, L); + __ should_not_reach_here(); + __ bind(L); +#endif // ASSERT + __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); + + + // codeBlob framesize is in words (not VMRegImpl::slot_size) + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(name, + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, false); + return stub; +} + //------------------------------Montgomery multiplication------------------------ // @@ -3475,3 +3577,94 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, reverse_words(m, (julong *)m_ints, longwords); } +#if INCLUDE_JFR + +// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. +// It returns a jobject handle to the event writer. +// The handle is dereferenced and the return value is the event writer oop. +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + enum layout { + rbp_off, + rbpH_off, + return_off, + return_off2, + framesize // inclusive of return address + }; + + CodeBuffer code("jfr_write_checkpoint", 1024, 64); + MacroAssembler* masm = new MacroAssembler(&code); + address start = __ pc(); + + __ enter(); + address the_pc = __ pc(); + + int frame_complete = the_pc - start; + + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); + __ movptr(c_rarg0, r15_thread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); + __ reset_last_Java_frame(true); + + // rax is jobject handle result, unpack and process it through a barrier. + __ resolve_global_jobject(rax, r15_thread, c_rarg0); + + __ leave(); + __ ret(0); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(frame_complete, map); + + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(code.name(), + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, + false); + return stub; +} + +// For c2: call to return a leased buffer. +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + enum layout { + rbp_off, + rbpH_off, + return_off, + return_off2, + framesize // inclusive of return address + }; + + CodeBuffer code("jfr_return_lease", 1024, 64); + MacroAssembler* masm = new MacroAssembler(&code); + address start = __ pc(); + + __ enter(); + address the_pc = __ pc(); + + int frame_complete = the_pc - start; + + __ set_last_Java_frame(rsp, rbp, the_pc, rscratch2); + __ movptr(c_rarg0, r15_thread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); + __ reset_last_Java_frame(true); + + __ leave(); + __ ret(0); + + OopMapSet* oop_maps = new OopMapSet(); + OopMap* map = new OopMap(framesize, 1); + oop_maps->add_gc_map(frame_complete, map); + + RuntimeStub* stub = + RuntimeStub::new_runtime_stub(code.name(), + &code, + frame_complete, + (framesize >> (LogBytesPerWord - LogBytesPerInt)), + oop_maps, + false); + return stub; +} + +#endif // INCLUDE_JFR + diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp index d8067a39177..de13772dcfb 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -3843,121 +3843,8 @@ class StubGenerator: public StubCodeGenerator { return start; } - public: - // Information about frame layout at time of blocking runtime call. - // Note that we only have to preserve callee-saved registers since - // the compilers are responsible for supplying a continuation point - // if they expect all registers to be preserved. - enum layout { - thread_off, // last_java_sp - arg1_off, - arg2_off, - rbp_off, // callee saved register - ret_pc, - framesize - }; - private: -#undef __ -#define __ masm-> - - //------------------------------------------------------------------------------------------------------------------------ - // Continuation point for throwing of implicit exceptions that are not handled in - // the current activation. Fabricates an exception oop and initiates normal - // exception dispatching in this frame. - // - // Previously the compiler (c2) allowed for callee save registers on Java calls. - // This is no longer true after adapter frames were removed but could possibly - // be brought back in the future if the interpreter code was reworked and it - // was deemed worthwhile. The comment below was left to describe what must - // happen here if callee saves were resurrected. As it stands now this stub - // could actually be a vanilla BufferBlob and have now oopMap at all. - // Since it doesn't make much difference we've chosen to leave it the - // way it was in the callee save days and keep the comment. - - // If we need to preserve callee-saved values we need a callee-saved oop map and - // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs. - // If the compiler needs all registers to be preserved between the fault - // point and the exception handler then it must assume responsibility for that in - // AbstractCompiler::continuation_for_implicit_null_exception or - // continuation_for_implicit_division_by_zero_exception. All other implicit - // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are - // either at call sites or otherwise assume that stack unwinding will be initiated, - // so caller saved registers were assumed volatile in the compiler. - address generate_throw_exception(const char* name, address runtime_entry, - Register arg1 = noreg, Register arg2 = noreg) { - - int insts_size = 256; - int locs_size = 32; - - CodeBuffer code(name, insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - - address start = __ pc(); - - // This is an inlined and slightly modified version of call_VM - // which has the ability to fetch the return PC out of - // thread-local storage and also sets up last_Java_sp slightly - // differently than the real call_VM - Register java_thread = rbx; - __ get_thread(java_thread); - - __ enter(); // required for proper stackwalking of RuntimeStub frame - - // pc and rbp, already pushed - __ subptr(rsp, (framesize-2) * wordSize); // prolog - - // Frame is now completed as far as size and linkage. - - int frame_complete = __ pc() - start; - - // push java thread (becomes first argument of C function) - __ movptr(Address(rsp, thread_off * wordSize), java_thread); - if (arg1 != noreg) { - __ movptr(Address(rsp, arg1_off * wordSize), arg1); - } - if (arg2 != noreg) { - assert(arg1 != noreg, "missing reg arg"); - __ movptr(Address(rsp, arg2_off * wordSize), arg2); - } - - // Set up last_Java_sp and last_Java_fp - __ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg); - - // Call runtime - BLOCK_COMMENT("call runtime_entry"); - __ call(RuntimeAddress(runtime_entry)); - // Generate oop map - OopMap* map = new OopMap(framesize, 0); - oop_maps->add_gc_map(__ pc() - start, map); - - // restore the thread (cannot use the pushed argument since arguments - // may be overwritten by C code generated by an optimizing compiler); - // however can use the register value directly if it is callee saved. - __ get_thread(java_thread); - - __ reset_last_Java_frame(java_thread, true); - - __ leave(); // required for proper stackwalking of RuntimeStub frame - - // check for pending exceptions -#ifdef ASSERT - Label L; - __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); - __ jcc(Assembler::notEqual, L); - __ should_not_reach_here(); - __ bind(L); -#endif /* ASSERT */ - __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - - - RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false); - return stub->entry_point(); - } - - void create_control_words() { // Round to nearest, 53-bit mode, exceptions masked StubRoutines::x86::_fpu_cntrl_wrd_std = 0x027F; @@ -3997,109 +3884,6 @@ class StubGenerator: public StubCodeGenerator { return nullptr; } -#if INCLUDE_JFR - - static void jfr_prologue(address the_pc, MacroAssembler* masm) { - Register java_thread = rdi; - __ get_thread(java_thread); - __ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg); - __ movptr(Address(rsp, 0), java_thread); - } - - // The handle is dereferenced through a load barrier. - static void jfr_epilogue(MacroAssembler* masm) { - Register java_thread = rdi; - __ get_thread(java_thread); - __ reset_last_Java_frame(java_thread, true); - } - - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - static RuntimeStub* generate_jfr_write_checkpoint() { - enum layout { - FPUState_off = 0, - rbp_off = FPUStateSizeInWords, - rdi_off, - rsi_off, - rcx_off, - rbx_off, - saved_argument_off, - saved_argument_off2, // 2nd half of double - framesize - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); - jfr_epilogue(_masm); - __ resolve_global_jobject(rax, rdi, rdx); - __ leave(); - __ ret(0); - - OopMap* map = new OopMap(framesize, 1); // rbp - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - - // For c2: call to return a leased buffer. - static RuntimeStub* generate_jfr_return_lease() { - enum layout { - FPUState_off = 0, - rbp_off = FPUStateSizeInWords, - rdi_off, - rsi_off, - rcx_off, - rbx_off, - saved_argument_off, - saved_argument_off2, // 2nd half of double - framesize - }; - - int insts_size = 1024; - int locs_size = 64; - CodeBuffer code("jfr_return_lease", insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* masm = new MacroAssembler(&code); - MacroAssembler* _masm = masm; - - address start = __ pc(); - __ enter(); - int frame_complete = __ pc() - start; - address the_pc = __ pc(); - jfr_prologue(the_pc, _masm); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); - jfr_epilogue(_masm); - __ leave(); - __ ret(0); - - OopMap* map = new OopMap(framesize, 1); // rbp - oop_maps->add_gc_map(the_pc - start, map); - - RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub; - } - -#endif // INCLUDE_JFR - //--------------------------------------------------------------------------- // Initialization @@ -4130,12 +3914,6 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::x86::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); StubRoutines::x86::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); - // Build this early so it's available for the interpreter - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); - StubRoutines::_throw_delayed_StackOverflowError_entry = generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); - if (UseCRC32Intrinsics) { // set table address before stub generation which use it StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; @@ -4188,28 +3966,11 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) } -#if INCLUDE_JFR - void generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); - } -#endif // INCLUDE_JFR - void generate_final_stubs() { // Generates all stubs and initializes the entry points - // These entry points require SharedInfo::stack0 to be set up in non-core builds - // and need to be relocatable, so they each fabricate a RuntimeStub internally. - StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); - StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); - StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); - // support for verify_oop (must happen after universe_init) StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index a7404b298f6..2bc4a0a9cba 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -45,9 +45,6 @@ #if INCLUDE_JVMCI #include "jvmci/jvmci_globals.hpp" #endif -#if INCLUDE_JFR -#include "jfr/support/jfrIntrinsics.hpp" -#endif // For a more detailed description of the stub routine structure // see the comment in stubRoutines.hpp @@ -3702,7 +3699,7 @@ address StubGenerator::generate_cont_thaw(const char* label, Continuation::thaw_ Label L_thaw_success; __ testptr(rbx, rbx); __ jccb(Assembler::notZero, L_thaw_success); - __ jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); __ bind(L_thaw_success); // Make room for the thawed frames and align the stack. @@ -3778,198 +3775,6 @@ address StubGenerator::generate_cont_returnBarrier_exception() { return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); } -#if INCLUDE_JFR - -// For c2: c_rarg0 is junk, call to runtime to write a checkpoint. -// It returns a jobject handle to the event writer. -// The handle is dereferenced and the return value is the event writer oop. -RuntimeStub* StubGenerator::generate_jfr_write_checkpoint() { - enum layout { - rbp_off, - rbpH_off, - return_off, - return_off2, - framesize // inclusive of return address - }; - - CodeBuffer code("jfr_write_checkpoint", 1024, 64); - MacroAssembler* _masm = new MacroAssembler(&code); - address start = __ pc(); - - __ enter(); - address the_pc = __ pc(); - - int frame_complete = the_pc - start; - - __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); - __ movptr(c_rarg0, r15_thread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1); - __ reset_last_Java_frame(true); - - // rax is jobject handle result, unpack and process it through a barrier. - __ resolve_global_jobject(rax, r15_thread, c_rarg0); - - __ leave(); - __ ret(0); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(frame_complete, map); - - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(code.name(), - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, - false); - return stub; -} - -// For c2: call to return a leased buffer. -RuntimeStub* StubGenerator::generate_jfr_return_lease() { - enum layout { - rbp_off, - rbpH_off, - return_off, - return_off2, - framesize // inclusive of return address - }; - - CodeBuffer code("jfr_return_lease", 1024, 64); - MacroAssembler* _masm = new MacroAssembler(&code); - address start = __ pc(); - - __ enter(); - address the_pc = __ pc(); - - int frame_complete = the_pc - start; - - __ set_last_Java_frame(rsp, rbp, the_pc, rscratch2); - __ movptr(c_rarg0, r15_thread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1); - __ reset_last_Java_frame(true); - - __ leave(); - __ ret(0); - - OopMapSet* oop_maps = new OopMapSet(); - OopMap* map = new OopMap(framesize, 1); - oop_maps->add_gc_map(frame_complete, map); - - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(code.name(), - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, - false); - return stub; -} - -#endif // INCLUDE_JFR - -// Continuation point for throwing of implicit exceptions that are -// not handled in the current activation. Fabricates an exception -// oop and initiates normal exception dispatching in this -// frame. Since we need to preserve callee-saved values (currently -// only for C2, but done for C1 as well) we need a callee-saved oop -// map and therefore have to make these stubs into RuntimeStubs -// rather than BufferBlobs. If the compiler needs all registers to -// be preserved between the fault point and the exception handler -// then it must assume responsibility for that in -// AbstractCompiler::continuation_for_implicit_null_exception or -// continuation_for_implicit_division_by_zero_exception. All other -// implicit exceptions (e.g., NullPointerException or -// AbstractMethodError on entry) are either at call sites or -// otherwise assume that stack unwinding will be initiated, so -// caller saved registers were assumed volatile in the compiler. -address StubGenerator::generate_throw_exception(const char* name, - address runtime_entry, - Register arg1, - Register arg2) { - // Information about frame layout at time of blocking runtime call. - // Note that we only have to preserve callee-saved registers since - // the compilers are responsible for supplying a continuation point - // if they expect all registers to be preserved. - enum layout { - rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, - rbp_off2, - return_off, - return_off2, - framesize // inclusive of return address - }; - - int insts_size = 512; - int locs_size = 64; - - CodeBuffer code(name, insts_size, locs_size); - OopMapSet* oop_maps = new OopMapSet(); - MacroAssembler* _masm = new MacroAssembler(&code); - - address start = __ pc(); - - // This is an inlined and slightly modified version of call_VM - // which has the ability to fetch the return PC out of - // thread-local storage and also sets up last_Java_sp slightly - // differently than the real call_VM - - __ enter(); // required for proper stackwalking of RuntimeStub frame - - assert(is_even(framesize/2), "sp not 16-byte aligned"); - - // return address and rbp are already in place - __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog - - int frame_complete = __ pc() - start; - - // Set up last_Java_sp and last_Java_fp - address the_pc = __ pc(); - __ set_last_Java_frame(rsp, rbp, the_pc, rscratch1); - __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack - - // Call runtime - if (arg1 != noreg) { - assert(arg2 != c_rarg1, "clobbered"); - __ movptr(c_rarg1, arg1); - } - if (arg2 != noreg) { - __ movptr(c_rarg2, arg2); - } - __ movptr(c_rarg0, r15_thread); - BLOCK_COMMENT("call runtime_entry"); - __ call(RuntimeAddress(runtime_entry)); - - // Generate oop map - OopMap* map = new OopMap(framesize, 0); - - oop_maps->add_gc_map(the_pc - start, map); - - __ reset_last_Java_frame(true); - - __ leave(); // required for proper stackwalking of RuntimeStub frame - - // check for pending exceptions -#ifdef ASSERT - Label L; - __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD); - __ jcc(Assembler::notEqual, L); - __ should_not_reach_here(); - __ bind(L); -#endif // ASSERT - __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - - - // codeBlob framesize is in words (not VMRegImpl::slot_size) - RuntimeStub* stub = - RuntimeStub::new_runtime_stub(name, - &code, - frame_complete, - (framesize >> (LogBytesPerWord - LogBytesPerInt)), - oop_maps, false); - return stub->entry_point(); -} - // exception handler for upcall stubs address StubGenerator::generate_upcall_stub_exception_handler() { StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); @@ -4087,17 +3892,6 @@ void StubGenerator::generate_initial_stubs() { StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); - // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = - generate_throw_exception("StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_StackOverflowError)); - StubRoutines::_throw_delayed_StackOverflowError_entry = - generate_throw_exception("delayed StackOverflowError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_delayed_StackOverflowError)); if (UseCRC32Intrinsics) { // set table address before stub generation which use it StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; @@ -4131,43 +3925,11 @@ void StubGenerator::generate_continuation_stubs() { StubRoutines::_cont_thaw = generate_cont_thaw(); StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier(); StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception(); - - JFR_ONLY(generate_jfr_stubs();) } -#if INCLUDE_JFR -void StubGenerator::generate_jfr_stubs() { - StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint(); - StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point(); - StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease(); - StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point(); -} -#endif - void StubGenerator::generate_final_stubs() { // Generates the rest of stubs and initializes the entry points - // These entry points require SharedInfo::stack0 to be set up in - // non-core builds and need to be relocatable, so they each - // fabricate a RuntimeStub internally. - StubRoutines::_throw_AbstractMethodError_entry = - generate_throw_exception("AbstractMethodError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_AbstractMethodError)); - - StubRoutines::_throw_IncompatibleClassChangeError_entry = - generate_throw_exception("IncompatibleClassChangeError throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_IncompatibleClassChangeError)); - - StubRoutines::_throw_NullPointerException_at_call_entry = - generate_throw_exception("NullPointerException at call throw_exception", - CAST_FROM_FN_PTR(address, - SharedRuntime:: - throw_NullPointerException_at_call)); - // support for verify_oop (must happen after universe_init) if (VerifyOops) { StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp index 374679750a4..d65c681585d 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp @@ -586,16 +586,6 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier(); address generate_cont_returnBarrier_exception(); -#if INCLUDE_JFR - void generate_jfr_stubs(); - // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. - // It returns a jobject handle to the event writer. - // The handle is dereferenced and the return value is the event writer oop. - RuntimeStub* generate_jfr_write_checkpoint(); - // For c2: call to runtime to return a buffer lease. - RuntimeStub* generate_jfr_return_lease(); -#endif // INCLUDE_JFR - // Continuation point for throwing of implicit exceptions that are // not handled in the current activation. Fabricates an exception // oop and initiates normal exception dispatching in this diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp index 3b32d577d44..76ad498be0e 100644 --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp @@ -545,8 +545,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { // Note: the restored frame is not necessarily interpreted. // Use the shared runtime version of the StackOverflowError. - assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); - __ jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); + assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated"); + __ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); // all done with frame size check __ bind(after_frame_check_pop); NOT_LP64(__ pop(rsi)); diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp index 986cee68512..454f8e5f632 100644 --- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp +++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp @@ -89,7 +89,7 @@ JRT_LEAF(void, zero_stub()) ShouldNotCallThis(); JRT_END -static RuntimeStub* generate_empty_runtime_stub(const char* name) { +static RuntimeStub* generate_empty_runtime_stub() { return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub); } @@ -101,7 +101,6 @@ static DeoptimizationBlob* generate_empty_deopt_blob() { return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub); } - void SharedRuntime::generate_deopt_blob() { _deopt_blob = generate_empty_deopt_blob(); } @@ -111,7 +110,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t } RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { - return generate_empty_runtime_stub("resolve_blob"); + return generate_empty_runtime_stub(); +} + +RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) { + return generate_empty_runtime_stub(); } int SharedRuntime::c_calling_convention(const BasicType *sig_bt, @@ -127,3 +130,15 @@ int SharedRuntime::vector_calling_convention(VMRegPair *regs, ShouldNotCallThis(); return 0; } + +#if INCLUDE_JFR +RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { + return nullptr; +} + +RuntimeStub* SharedRuntime::generate_jfr_return_lease() { + return nullptr; +} + +#endif // INCLUDE_JFR + diff --git a/src/hotspot/cpu/zero/stubGenerator_zero.cpp b/src/hotspot/cpu/zero/stubGenerator_zero.cpp index 5c7772021ea..b6905791e98 100644 --- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp +++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp @@ -203,22 +203,6 @@ class StubGenerator: public StubCodeGenerator { void generate_final_stubs() { // Generates all stubs and initializes the entry points - // These entry points require SharedInfo::stack0 to be set up in - // non-core builds and need to be relocatable, so they each - // fabricate a RuntimeStub internally. - StubRoutines::_throw_AbstractMethodError_entry = - ShouldNotCallThisStub(); - - StubRoutines::_throw_NullPointerException_at_call_entry = - ShouldNotCallThisStub(); - - StubRoutines::_throw_StackOverflowError_entry = - ShouldNotCallThisStub(); - - // support for verify_oop (must happen after universe_init) - StubRoutines::_verify_oop_subroutine_entry = - ShouldNotCallThisStub(); - // arraycopy stubs used by compilers generate_arraycopy_stubs(); diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp index 8fdb96a3038..fa4b1c75c05 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp @@ -50,6 +50,7 @@ class CompilerToVM { static address SharedRuntime_deopt_blob_unpack_with_exception_in_tls; static address SharedRuntime_deopt_blob_uncommon_trap; static address SharedRuntime_polling_page_return_handler; + static address SharedRuntime_throw_delayed_StackOverflowError_entry; static address nmethod_entry_barrier; static int thread_disarmed_guard_value_offset; diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp index 2116133e56e..26c88abec0f 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp @@ -68,6 +68,7 @@ address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack; address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack_with_exception_in_tls; address CompilerToVM::Data::SharedRuntime_deopt_blob_uncommon_trap; address CompilerToVM::Data::SharedRuntime_polling_page_return_handler; +address CompilerToVM::Data::SharedRuntime_throw_delayed_StackOverflowError_entry; address CompilerToVM::Data::nmethod_entry_barrier; int CompilerToVM::Data::thread_disarmed_guard_value_offset; @@ -158,6 +159,7 @@ void CompilerToVM::Data::initialize(JVMCI_TRAPS) { SharedRuntime_deopt_blob_unpack_with_exception_in_tls = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); SharedRuntime_deopt_blob_uncommon_trap = SharedRuntime::deopt_blob()->uncommon_trap(); SharedRuntime_polling_page_return_handler = SharedRuntime::polling_page_return_handler_blob()->entry_point(); + SharedRuntime_throw_delayed_StackOverflowError_entry = SharedRuntime::throw_delayed_StackOverflowError_entry(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != nullptr) { diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 5870e49ac94..688691fb976 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -68,6 +68,8 @@ static_field(CompilerToVM::Data, SharedRuntime_deopt_blob_uncommon_trap, address) \ static_field(CompilerToVM::Data, SharedRuntime_polling_page_return_handler, \ address) \ + static_field(CompilerToVM::Data, SharedRuntime_throw_delayed_StackOverflowError_entry, \ + address) \ \ static_field(CompilerToVM::Data, nmethod_entry_barrier, address) \ static_field(CompilerToVM::Data, thread_disarmed_guard_value_offset, int) \ @@ -328,8 +330,6 @@ \ static_field(StubRoutines, _verify_oop_count, jint) \ \ - static_field(StubRoutines, _throw_delayed_StackOverflowError_entry, address) \ - \ static_field(StubRoutines, _jbyte_arraycopy, address) \ static_field(StubRoutines, _jshort_arraycopy, address) \ static_field(StubRoutines, _jint_arraycopy, address) \ diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 542514b1f7e..eced285f8cb 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -3167,7 +3167,7 @@ bool LibraryCallKit::inline_native_jvm_commit() { // Make a runtime call, which can safepoint, to return the leased buffer. This updates both the JfrThreadLocal and the Java event writer oop. Node* call_return_lease = make_runtime_call(RC_NO_LEAF, OptoRuntime::void_void_Type(), - StubRoutines::jfr_return_lease(), + SharedRuntime::jfr_return_lease(), "return_lease", TypePtr::BOTTOM); Node* call_return_lease_control = _gvn.transform(new ProjNode(call_return_lease, TypeFunc::Control)); @@ -3366,7 +3366,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { // The call also updates the native thread local thread id and the vthread with the current epoch. Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF, OptoRuntime::jfr_write_checkpoint_Type(), - StubRoutines::jfr_write_checkpoint(), + SharedRuntime::jfr_write_checkpoint(), "write_checkpoint", TypePtr::BOTTOM); Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control)); diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index 368f4e62c76..7f23aca0961 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -126,7 +126,10 @@ jint init_globals() { compilationPolicy_init(); codeCache_init(); VM_Version_init(); // depends on codeCache_init for emitting code + // stub routines in initial blob are referenced by later generated code initial_stubs_init(); + // stack overflow exception blob is referenced by the interpreter + SharedRuntime::generate_initial_stubs(); jint status = universe_init(); // dependent on codeCache_init and // initial_stubs_init and metaspace_init. if (status != JNI_OK) @@ -144,6 +147,9 @@ jint init_globals() { gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init continuations_init(); // must precede continuation stub generation continuation_stubs_init(); // depends on continuations_init +#if INCLUDE_JFR + SharedRuntime::generate_jfr_stubs(); +#endif interpreter_init_stub(); // before methods get loaded accessFlags_init(); InterfaceSupport_init(); diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 0587032ec5c..f98d031a2cd 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -71,6 +71,7 @@ #include "runtime/stackWatermarkSet.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.inline.hpp" +#include "runtime/timerTrace.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" #include "runtime/vm_version.hpp" @@ -88,23 +89,42 @@ #include "jfr/jfr.hpp" #endif -// Shared stub locations +// Shared runtime stub routines reside in their own unique blob with a +// single entry point + RuntimeStub* SharedRuntime::_wrong_method_blob; RuntimeStub* SharedRuntime::_wrong_method_abstract_blob; RuntimeStub* SharedRuntime::_ic_miss_blob; RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_static_call_blob; -address SharedRuntime::_resolve_static_call_entry; DeoptimizationBlob* SharedRuntime::_deopt_blob; SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; +RuntimeStub* SharedRuntime::_throw_AbstractMethodError_blob; +RuntimeStub* SharedRuntime::_throw_IncompatibleClassChangeError_blob; +RuntimeStub* SharedRuntime::_throw_NullPointerException_at_call_blob; +RuntimeStub* SharedRuntime::_throw_StackOverflowError_blob; +RuntimeStub* SharedRuntime::_throw_delayed_StackOverflowError_blob; + +#if INCLUDE_JFR +RuntimeStub* SharedRuntime::_jfr_write_checkpoint_blob = nullptr; +RuntimeStub* SharedRuntime::_jfr_return_lease_blob = nullptr; +#endif + nmethod* SharedRuntime::_cont_doYield_stub; //----------------------------generate_stubs----------------------------------- +void SharedRuntime::generate_initial_stubs() { + // Build this early so it's available for the interpreter. + _throw_StackOverflowError_blob = + generate_throw_exception("StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); +} + void SharedRuntime::generate_stubs() { _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub"); _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub"); @@ -112,7 +132,22 @@ void SharedRuntime::generate_stubs() { _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); - _resolve_static_call_entry = _resolve_static_call_blob->entry_point(); + + _throw_delayed_StackOverflowError_blob = + generate_throw_exception("delayed StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); + + _throw_AbstractMethodError_blob = + generate_throw_exception("AbstractMethodError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); + + _throw_IncompatibleClassChangeError_blob = + generate_throw_exception("IncompatibleClassChangeError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); + + _throw_NullPointerException_at_call_blob = + generate_throw_exception("NullPointerException at call throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); AdapterHandlerLibrary::initialize(); @@ -129,6 +164,19 @@ void SharedRuntime::generate_stubs() { generate_deopt_blob(); } +#if INCLUDE_JFR +//------------------------------generate jfr runtime stubs ------ +void SharedRuntime::generate_jfr_stubs() { + ResourceMark rm; + const char* timer_msg = "SharedRuntime generate_jfr_stubs"; + TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); + + _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint(); + _jfr_return_lease_blob = generate_jfr_return_lease(); +} + +#endif // INCLUDE_JFR + #include // Implementation of SharedRuntime @@ -867,7 +915,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // method stack banging. assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap"); Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc)); - return StubRoutines::throw_StackOverflowError_entry(); + return SharedRuntime::throw_StackOverflowError_entry(); } case IMPLICIT_NULL: { @@ -893,7 +941,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // Assert that the signal comes from the expected location in stub code. assert(vt_stub->is_null_pointer_exception(pc), "obtained signal from unexpected location in stub code"); - return StubRoutines::throw_NullPointerException_at_call_entry(); + return SharedRuntime::throw_NullPointerException_at_call_entry(); } } else { CodeBlob* cb = CodeCache::find_blob(pc); @@ -914,7 +962,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, } Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc)); // There is no handler here, so we will simply unwind. - return StubRoutines::throw_NullPointerException_at_call_entry(); + return SharedRuntime::throw_NullPointerException_at_call_entry(); } // Otherwise, it's a compiled method. Consult its exception handlers. @@ -925,13 +973,13 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // is not set up yet) => use return address pushed by // caller => don't push another return address Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc)); - return StubRoutines::throw_NullPointerException_at_call_entry(); + return SharedRuntime::throw_NullPointerException_at_call_entry(); } if (nm->method()->is_method_handle_intrinsic()) { // exception happened inside MH dispatch code, similar to a vtable stub Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc)); - return StubRoutines::throw_NullPointerException_at_call_entry(); + return SharedRuntime::throw_NullPointerException_at_call_entry(); } #ifndef PRODUCT @@ -1467,7 +1515,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* assert(callerFrame.is_compiled_frame(), "must be"); // Install exception and return forward entry. - address res = StubRoutines::throw_AbstractMethodError_entry(); + address res = SharedRuntime::throw_AbstractMethodError_entry(); JRT_BLOCK methodHandle callee(current, invoke.static_target(current)); if (!callee.is_null()) { @@ -2387,7 +2435,7 @@ void AdapterHandlerLibrary::initialize() { // AbstractMethodError for invalid invocations. address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr), - StubRoutines::throw_AbstractMethodError_entry(), + SharedRuntime::throw_AbstractMethodError_entry(), wrong_method_abstract, wrong_method_abstract); _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size); diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 9eec8e079ec..00a72f5db1e 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -54,7 +54,6 @@ class SharedRuntime: AllStatic { static RuntimeStub* _resolve_opt_virtual_call_blob; static RuntimeStub* _resolve_virtual_call_blob; static RuntimeStub* _resolve_static_call_blob; - static address _resolve_static_call_entry; static DeoptimizationBlob* _deopt_blob; @@ -64,6 +63,17 @@ class SharedRuntime: AllStatic { static nmethod* _cont_doYield_stub; + static RuntimeStub* _throw_AbstractMethodError_blob; + static RuntimeStub* _throw_IncompatibleClassChangeError_blob; + static RuntimeStub* _throw_NullPointerException_at_call_blob; + static RuntimeStub* _throw_StackOverflowError_blob; + static RuntimeStub* _throw_delayed_StackOverflowError_blob; + +#if INCLUDE_JFR + static RuntimeStub* _jfr_write_checkpoint_blob; + static RuntimeStub* _jfr_return_lease_blob; +#endif + #ifndef PRODUCT // Counters static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) @@ -73,9 +83,19 @@ class SharedRuntime: AllStatic { enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP }; static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type); static RuntimeStub* generate_resolve_blob(address destination, const char* name); - + static RuntimeStub* generate_throw_exception(const char* name, address runtime_entry); public: + static void generate_initial_stubs(void); static void generate_stubs(void); +#if INCLUDE_JFR + static void generate_jfr_stubs(void); + // For c2: c_rarg0 is junk, call to runtime to write a checkpoint. + // It returns a jobject handle to the event writer. + // The handle is dereferenced and the return value is the event writer oop. + static RuntimeStub* generate_jfr_write_checkpoint(); + // For c2: call to runtime to return a buffer lease. + static RuntimeStub* generate_jfr_return_lease(); +#endif // max bytes for each dtrace string parameter enum { max_dtrace_string_size = 256 }; @@ -241,6 +261,18 @@ class SharedRuntime: AllStatic { return _cont_doYield_stub; } + // Implicit exceptions + static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_blob->entry_point(); } + static address throw_IncompatibleClassChangeError_entry() { return _throw_IncompatibleClassChangeError_blob->entry_point(); } + static address throw_NullPointerException_at_call_entry() { return _throw_NullPointerException_at_call_blob->entry_point(); } + static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_blob->entry_point(); } + static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_blob->entry_point(); } + +#if INCLUDE_JFR + static address jfr_write_checkpoint() { return _jfr_write_checkpoint_blob->entry_point(); } + static address jfr_return_lease() { return _jfr_return_lease_blob->entry_point(); } +#endif + // Counters #ifndef PRODUCT static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; } diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 773f8031e15..c13f64fca4b 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -61,11 +61,6 @@ address StubRoutines::_call_stub_entry = nullptr; address StubRoutines::_catch_exception_entry = nullptr; address StubRoutines::_forward_exception_entry = nullptr; -address StubRoutines::_throw_AbstractMethodError_entry = nullptr; -address StubRoutines::_throw_IncompatibleClassChangeError_entry = nullptr; -address StubRoutines::_throw_NullPointerException_at_call_entry = nullptr; -address StubRoutines::_throw_StackOverflowError_entry = nullptr; -address StubRoutines::_throw_delayed_StackOverflowError_entry = nullptr; jint StubRoutines::_verify_oop_count = 0; address StubRoutines::_verify_oop_subroutine_entry = nullptr; address StubRoutines::_atomic_xchg_entry = nullptr; @@ -191,11 +186,6 @@ address StubRoutines::_cont_thaw = nullptr; address StubRoutines::_cont_returnBarrier = nullptr; address StubRoutines::_cont_returnBarrierExc = nullptr; -JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = nullptr;) -JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = nullptr;) -JFR_ONLY(RuntimeStub* StubRoutines::_jfr_return_lease_stub = nullptr;) -JFR_ONLY(address StubRoutines::_jfr_return_lease = nullptr;) - address StubRoutines::_upcall_stub_exception_handler = nullptr; address StubRoutines::_lookup_secondary_supers_table_slow_path_stub = nullptr; diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 762a6edf590..206a5ec2e99 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -36,7 +36,34 @@ // StubRoutines provides entry points to assembly routines used by // compiled code and the run-time system. Platform-specific entry -// points are defined in the platform-specific inner class. +// points are defined in the platform-specific inner class. Most +// routines have a single (main) entry point. However, a few routines +// do provide alternative entry points. +// +// Stub routines whose entries are advertised via class StubRoutines +// are generated in batches at well-defined stages during JVM init: +// initial stubs, continuation stubs, compiler stubs, final stubs. +// Each batch is embedded in a single, associated blob (an instance of +// BufferBlob) i.e. the blob to entry relationship is 1-m. +// +// Note that this constrasts with the much smaller number of stub +// routines generated via classes SharedRuntime, c1_Runtime1 and +// OptoRuntime. The latter routines are also generated at well-defined +// points during JVM init. However, each stub routine has its own +// unique blob (various subclasses of RuntimeBlob) i.e. the blob to +// entry relationship is 1-1. The difference arises because +// SharedRuntime routines may need to be relocatable or advertise +// properties such as a frame size via their blob. +// +// Staging of stub routine generation is needed in order to manage +// init dependencies between 1) stubs and other stubs or 2) stubs and +// other runtime components. For example, some exception throw stubs +// need to be generated before compiler stubs (such as the +// deoptimization stub) so that the latter can invoke the thrwo rotine +// in bail-out code. Likewise, stubs that access objects (such as the +// object array copy stub) need to be created after initialization of +// some GC constants and generation of the GC barrier stubs they might +// need to invoke. // // Class scheme: // @@ -49,8 +76,7 @@ // | | // | | // stubRoutines.cpp stubRoutines_.cpp -// stubRoutines_.cpp stubGenerator_.cpp -// stubRoutines_.cpp +// stubGenerator_.cpp // // Note 1: The important thing is a clean decoupling between stub // entry points (interfacing to the whole vm; i.e., 1-to-n @@ -75,6 +101,8 @@ // 3. add a public accessor function to the instance variable // 4. implement the corresponding generator function in the platform-dependent // stubGenerator_.cpp file and call the function in generate_all() of that file +// 5. ensure the entry is generated in the right blob to satisfy initialization +// dependencies between it and other stubs or runtime components. class UnsafeMemoryAccess : public CHeapObj { private: @@ -137,11 +165,6 @@ class StubRoutines: AllStatic { static address _call_stub_entry; static address _forward_exception_entry; static address _catch_exception_entry; - static address _throw_AbstractMethodError_entry; - static address _throw_IncompatibleClassChangeError_entry; - static address _throw_NullPointerException_at_call_entry; - static address _throw_StackOverflowError_entry; - static address _throw_delayed_StackOverflowError_entry; static address _atomic_xchg_entry; static address _atomic_cmpxchg_entry; @@ -269,11 +292,6 @@ class StubRoutines: AllStatic { static address _cont_returnBarrier; static address _cont_returnBarrierExc; - JFR_ONLY(static RuntimeStub* _jfr_write_checkpoint_stub;) - JFR_ONLY(static address _jfr_write_checkpoint;) - JFR_ONLY(static RuntimeStub* _jfr_return_lease_stub;) - JFR_ONLY(static address _jfr_return_lease;) - // Vector Math Routines static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP]; static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP]; @@ -329,12 +347,6 @@ class StubRoutines: AllStatic { // Exceptions static address forward_exception_entry() { return _forward_exception_entry; } - // Implicit exceptions - static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; } - static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; } - static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; } - static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; } - static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; } static address atomic_xchg_entry() { return _atomic_xchg_entry; } static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; } @@ -487,9 +499,6 @@ class StubRoutines: AllStatic { static address cont_returnBarrier() { return _cont_returnBarrier; } static address cont_returnBarrierExc(){return _cont_returnBarrierExc; } - JFR_ONLY(static address jfr_write_checkpoint() { return _jfr_write_checkpoint; }) - JFR_ONLY(static address jfr_return_lease() { return _jfr_return_lease; }) - static address upcall_stub_exception_handler() { assert(_upcall_stub_exception_handler != nullptr, "not implemented"); return _upcall_stub_exception_handler; From 6d430f24df9d599fe1e12c6b65117c02773ae5d8 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Mon, 19 Aug 2024 09:08:54 +0000 Subject: [PATCH 28/67] 8338314: JFR: Split JFRCheckpoint VM operation Reviewed-by: mgronlun, egahlin --- .../recorder/service/jfrRecorderService.cpp | 28 ++++++++++++------- .../recorder/service/jfrRecorderService.hpp | 2 ++ src/hotspot/share/runtime/vmOperation.hpp | 3 +- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp index 0395b711c65..9f24bddcd3c 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp @@ -393,14 +393,22 @@ static u4 write_metadata(JfrChunkWriter& chunkwriter) { return invoke(wm); } -template -class JfrVMOperation : public VM_Operation { +class JfrSafepointClearVMOperation : public VM_Operation { private: - Instance& _instance; + JfrRecorderService& _instance; + public: + JfrSafepointClearVMOperation(JfrRecorderService& instance) : _instance(instance) {} + void doit() { _instance.safepoint_clear(); } + VMOp_Type type() const { return VMOp_JFRSafepointClear; } +}; + +class JfrSafepointWriteVMOperation : public VM_Operation { + private: + JfrRecorderService& _instance; public: - JfrVMOperation(Instance& instance) : _instance(instance) {} - void doit() { (_instance.*func)(); } - VMOp_Type type() const { return VMOp_JFRCheckpoint; } + JfrSafepointWriteVMOperation(JfrRecorderService& instance) : _instance(instance) {} + void doit() { _instance.safepoint_write(); } + VMOp_Type type() const { return VMOp_JFRSafepointWrite; } }; JfrRecorderService::JfrRecorderService() : @@ -470,9 +478,9 @@ void JfrRecorderService::pre_safepoint_clear() { } void JfrRecorderService::invoke_safepoint_clear() { - JfrVMOperation safepoint_task(*this); + JfrSafepointClearVMOperation op(*this); ThreadInVMfromNative transition(JavaThread::current()); - VMThread::execute(&safepoint_task); + VMThread::execute(&op); } void JfrRecorderService::safepoint_clear() { @@ -577,10 +585,10 @@ void JfrRecorderService::pre_safepoint_write() { } void JfrRecorderService::invoke_safepoint_write() { - JfrVMOperation safepoint_task(*this); + JfrSafepointWriteVMOperation op(*this); // can safepoint here ThreadInVMfromNative transition(JavaThread::current()); - VMThread::execute(&safepoint_task); + VMThread::execute(&op); } void JfrRecorderService::safepoint_write() { diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.hpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.hpp index 89c0437dd13..e5b4500afc0 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.hpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.hpp @@ -35,6 +35,8 @@ class JfrStorage; class JfrStringPool; class JfrRecorderService : public StackObj { + friend class JfrSafepointClearVMOperation; + friend class JfrSafepointWriteVMOperation; private: JfrCheckpointManager& _checkpoint_manager; JfrChunkWriter& _chunkwriter; diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp index 36aa6d3f28a..532a9231b70 100644 --- a/src/hotspot/share/runtime/vmOperation.hpp +++ b/src/hotspot/share/runtime/vmOperation.hpp @@ -87,7 +87,8 @@ template(HeapWalkOperation) \ template(HeapIterateOperation) \ template(ReportJavaOutOfMemory) \ - template(JFRCheckpoint) \ + template(JFRSafepointClear) \ + template(JFRSafepointWrite) \ template(ShenandoahFullGC) \ template(ShenandoahInitMark) \ template(ShenandoahFinalMarkStartEvac) \ From e07a5b66267156f55ee1c28579382990e58f15eb Mon Sep 17 00:00:00 2001 From: Erik Gahlin Date: Mon, 19 Aug 2024 10:42:58 +0000 Subject: [PATCH 29/67] 8338512: JFR: Revert changes to TestCodeSweeper Reviewed-by: mgronlun --- test/jdk/ProblemList.txt | 1 + .../jfr/event/compiler/TestCodeSweeper.java | 54 ++++++------------- 2 files changed, 17 insertions(+), 38 deletions(-) diff --git a/test/jdk/ProblemList.txt b/test/jdk/ProblemList.txt index 6cde7111383..bbf594f3bc7 100644 --- a/test/jdk/ProblemList.txt +++ b/test/jdk/ProblemList.txt @@ -743,6 +743,7 @@ jdk/incubator/vector/LoadJsvmlTest.java 8305390 windows- # jdk_jfr +jdk/jfr/event/compiler/TestCodeSweeper.java 8338127 generic-all jdk/jfr/event/runtime/TestResidentSetSizeEvent.java 8309846 aix-ppc64 jdk/jfr/jvm/TestWaste.java 8282427 generic-all diff --git a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java index df95af5b9be..62fb137b1cf 100644 --- a/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java +++ b/test/jdk/jdk/jfr/event/compiler/TestCodeSweeper.java @@ -27,11 +27,9 @@ import java.lang.reflect.Method; import java.time.Instant; import java.util.ArrayList; -import java.util.Collections; import java.util.List; -import jdk.jfr.Event; -import jdk.jfr.consumer.RecordingStream; +import jdk.jfr.Recording; import jdk.jfr.consumer.RecordedEvent; import jdk.test.lib.Asserts; import jdk.test.lib.jfr.EventNames; @@ -41,7 +39,7 @@ import jdk.test.whitebox.code.CodeBlob; /** - * Test for events: jdk.CodeCacheFull jdk.CompilationFailure + * Test for events: vm/code_cache/full vm/compiler/failure * * We verify that we should get at least one of each of the events listed above. * @@ -60,15 +58,13 @@ */ public class TestCodeSweeper { - static class ProvocationEvent extends Event { - } private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); private static final int COMP_LEVEL_SIMPLE = 1; private static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; private static final int SIZE = 1; private static final String METHOD_NAME = "verifyFullEvent"; - private static final String EVENT_CODE_CACHE_FULL = EventNames.CodeCacheFull; - private static final String EVENT_COMPILATION_FAILURE = EventNames.CompilationFailure; + private static final String pathFull = EventNames.CodeCacheFull; + private static final String pathFailure = EventNames.CompilationFailure; public static final long SEGMENT_SIZE = WhiteBox.getWhiteBox().getUintxVMFlag("CodeCacheSegmentSize"); public static final long MIN_BLOCK_LENGTH = WhiteBox.getWhiteBox().getUintxVMFlag("CodeCacheMinBlockLength"); public static final long MIN_ALLOCATION = SEGMENT_SIZE * MIN_BLOCK_LENGTH; @@ -81,41 +77,26 @@ public static void main(String[] args) throws Throwable { System.out.println("This test will warn that the code cache is full."); System.out.println("That is expected and is the purpose of the test."); System.out.println("************************************************"); - List events = Collections.synchronizedList(new ArrayList<>()); - try (RecordingStream rs = new RecordingStream()) { - rs.setReuse(false); - rs.enable(EVENT_CODE_CACHE_FULL); - rs.enable(EVENT_COMPILATION_FAILURE); - rs.onEvent(EVENT_CODE_CACHE_FULL, events::add); - rs.onEvent(EVENT_COMPILATION_FAILURE, events::add); - rs.onEvent(ProvocationEvent.class.getName(), e -> { - if (!events.isEmpty()) { - rs.close(); - return; - } - // Retry if CodeCacheFull or CompilationFailure events weren't provoked - try { - provokeEvents(); - } catch (Exception ex) { - ex.printStackTrace(); - rs.close(); - } - }); - rs.startAsync(); - provokeEvents(); - rs.awaitTermination(); - } + + Recording r = new Recording(); + r.enable(pathFull); + r.enable(pathFailure); + r.start(); + provokeEvents(); + r.stop(); int countEventFull = 0; int countEventFailure = 0; + + List events = Events.fromRecording(r); Events.hasEvents(events); - for (RecordedEvent event : new ArrayList<>(events)) { + for (RecordedEvent event : events) { switch (event.getEventType().getName()) { - case EVENT_CODE_CACHE_FULL: + case pathFull: countEventFull++; verifyFullEvent(event); break; - case EVENT_COMPILATION_FAILURE: + case pathFailure: countEventFailure++; verifyFailureEvent(event); break; @@ -134,8 +115,6 @@ private static boolean canAllocate(double size, long maxSize, MemoryPoolMXBean b } private static void provokeEvents() throws NoSuchMethodException, InterruptedException { - System.out.println("provokeEvents()"); - ProvocationEvent provocationEvent = new ProvocationEvent(); // Prepare for later, since we don't want to trigger any compilation // setting this up. Method method = TestCodeSweeper.class.getDeclaredMethod(METHOD_NAME, new Class[] { RecordedEvent.class }); @@ -180,7 +159,6 @@ private static void provokeEvents() throws NoSuchMethodException, InterruptedExc for (Long blob : blobs) { WHITE_BOX.freeCodeBlob(blob); } - provocationEvent.commit(); } private static void verifyFullEvent(RecordedEvent event) throws Throwable { From 6ff6b0994380276e0096f7b55a0d659803344679 Mon Sep 17 00:00:00 2001 From: Renjith Kannath Pariyangad Date: Mon, 19 Aug 2024 12:40:35 +0000 Subject: [PATCH 30/67] 8290501: Typo in javax.swing.BoundedRangeModel documentation Reviewed-by: aivanov, prr, honkar --- .../share/classes/javax/swing/BoundedRangeModel.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/java.desktop/share/classes/javax/swing/BoundedRangeModel.java b/src/java.desktop/share/classes/javax/swing/BoundedRangeModel.java index bc6a84ac891..e6a9d08611d 100644 --- a/src/java.desktop/share/classes/javax/swing/BoundedRangeModel.java +++ b/src/java.desktop/share/classes/javax/swing/BoundedRangeModel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ package javax.swing; -import javax.swing.event.*; +import javax.swing.event.ChangeListener; /** @@ -40,7 +40,7 @@ * range is value,value+extent. The inner range * must lie within the outer one, i.e. value must be * less than or equal to maximum and value+extent - * must greater than or equal to minimum, and maximum + * must be greater than or equal to minimum, and maximum * must be greater than or equal to minimum. * There are a few features of this model that one might find a little * surprising. These quirks exist for the convenience of the @@ -228,7 +228,7 @@ public interface BoundedRangeModel /** - * This method sets all of the model's data with a single method call. + * This method sets all the model's data with a single method call. * The method results in a single change event being generated. This is * convenient when you need to adjust all the model data simultaneously and * do not want individual change events to occur. From f0fe31383aec652ad4e3cc4873cd3ff9b918fef7 Mon Sep 17 00:00:00 2001 From: Adam Sotona Date: Mon, 19 Aug 2024 12:57:17 +0000 Subject: [PATCH 31/67] 8338564: Remove obsolete AbstractNamedEntry::equals method Reviewed-by: liach --- .../jdk/internal/classfile/impl/AbstractPoolEntry.java | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java index c6e6c4dce57..c27968eecab 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AbstractPoolEntry.java @@ -534,15 +534,6 @@ public Utf8Entry name() { public String asInternalName() { return ref1.stringValue(); } - - @Override - public boolean equals(Object o) { - if (o == this) { return true; } - if (o instanceof AbstractNamedEntry ne) { - return tag == ne.tag() && name().equals(ref1()); - } - return false; - } } public static final class ClassEntryImpl extends AbstractNamedEntry implements ClassEntry { From 2766b09e29d7c1c31fdef20f016a181eedb2d429 Mon Sep 17 00:00:00 2001 From: Alan Bateman Date: Mon, 19 Aug 2024 12:57:29 +0000 Subject: [PATCH 32/67] 8338452: (dc) DatagramChannelImpl.blockingReceive with timeout may block indefinitely if all datagrams blocked by SecurityManager Reviewed-by: dfuchs --- .../classes/sun/nio/ch/DatagramChannelImpl.java | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java index 06e79f416ac..5fac688b311 100644 --- a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java +++ b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java @@ -683,14 +683,12 @@ void blockingReceive(DatagramPacket p, long nanos) throws IOException { } long startNanos = System.nanoTime(); + long remainingNanos = nanos; SocketAddress sender = null; try { SocketAddress remote = beginRead(true, false); boolean connected = (remote != null); do { - long remainingNanos = (nanos > 0) - ? nanos - (System.nanoTime() - startNanos) - : 0; ByteBuffer dst = tryBlockingReceive(connected, bufLength, remainingNanos); // if datagram received then get sender and copy to DatagramPacket @@ -711,8 +709,8 @@ void blockingReceive(DatagramPacket p, long nanos) throws IOException { } } - // copy bytes to the DatagramPacket, and set length and sender if (sender != null) { + // copy bytes to the DatagramPacket, and set length and sender synchronized (p) { // re-read p.bufLength in case DatagramPacket changed int len = Math.min(dst.limit(), DatagramPackets.getBufLength(p)); @@ -720,6 +718,14 @@ void blockingReceive(DatagramPacket p, long nanos) throws IOException { DatagramPackets.setLength(p, len); p.setSocketAddress(sender); } + } else { + // need to retry, adjusting timeout if needed + if (nanos > 0) { + remainingNanos = nanos - (System.nanoTime() - startNanos); + if (remainingNanos <= 0) { + throw new SocketTimeoutException("Receive timed out"); + } + } } } finally { Util.offerFirstTemporaryDirectBuffer(dst); @@ -746,6 +752,7 @@ void blockingReceive(DatagramPacket p, long nanos) throws IOException { private ByteBuffer tryBlockingReceive(boolean connected, int len, long nanos) throws IOException { + assert nanos >= 0; long startNanos = System.nanoTime(); ByteBuffer dst = Util.getTemporaryDirectBuffer(len); int n = -1; From 3ca359ad224b07f283c99eb43bed02eb93ef2dc7 Mon Sep 17 00:00:00 2001 From: Daniel Fuchs Date: Mon, 19 Aug 2024 13:47:40 +0000 Subject: [PATCH 33/67] 8335771: Improve stability of java/nio/channels/DatagramChannel tests Reviewed-by: alanb --- .../DatagramChannel/AdaptorMulticasting.java | 32 ++++-- .../DatagramChannel/AfterDisconnect.java | 99 +++++++++++++++---- .../nio/channels/DatagramChannel/Connect.java | 18 +++- .../ManySourcesAndTargets.java | 27 ++++- .../MulticastSendReceiveTests.java | 21 +++- .../channels/DatagramChannel/NotBound.java | 74 ++++++++++---- .../channels/DatagramChannel/Promiscuous.java | 43 ++++---- .../channels/DatagramChannel/ReceiveISA.java | 18 +++- .../DatagramChannel/SelectWhenRefused.java | 60 ++++++++--- .../DatagramChannel/SendReceiveMaxSize.java | 33 ++++++- .../nio/channels/DatagramChannel/Sender.java | 52 ++++++---- 11 files changed, 365 insertions(+), 112 deletions(-) diff --git a/test/jdk/java/nio/channels/DatagramChannel/AdaptorMulticasting.java b/test/jdk/java/nio/channels/DatagramChannel/AdaptorMulticasting.java index 0c8da1ff9d9..cab2bc76285 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/AdaptorMulticasting.java +++ b/test/jdk/java/nio/channels/DatagramChannel/AdaptorMulticasting.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,7 @@ import java.util.stream.Collectors; import static java.net.StandardSocketOptions.*; import static java.net.StandardProtocolFamily.*; +import static jdk.test.lib.NetworkConfiguration.isSameInterface; import jdk.test.lib.NetworkConfiguration; import jdk.test.lib.net.IPSupport; @@ -295,8 +296,8 @@ static void testNetworkInterface(MulticastSocket s, // setNetworkInterface s.setNetworkInterface(ni); - assertTrue(s.getNetworkInterface().equals(ni)); - assertTrue(s.getOption(IP_MULTICAST_IF).equals(ni)); + assertTrue(isSameInterface(s.getNetworkInterface(), ni)); + assertTrue(isSameInterface(s.getOption(IP_MULTICAST_IF), ni)); InetAddress address = s.getInterface(); assertTrue(ni.inetAddresses().filter(address::equals).findAny().isPresent()); @@ -315,8 +316,8 @@ static void testNetworkInterface(MulticastSocket s, // setOption(IP_MULTICAST_IF) s.setOption(IP_MULTICAST_IF, ni); - assertTrue(s.getOption(IP_MULTICAST_IF).equals(ni)); - assertTrue(s.getNetworkInterface().equals(ni)); + assertTrue(isSameInterface(s.getOption(IP_MULTICAST_IF), ni)); + assertTrue(isSameInterface(s.getNetworkInterface(), ni)); // bad values for IP_MULTICAST_IF assertThrows(IllegalArgumentException.class, @@ -412,7 +413,8 @@ static void testSendReceive(MulticastSocket s, InetAddress group) throws IOExcep assertTrue(s.getOption(IP_MULTICAST_IF) != null); SocketAddress target = new InetSocketAddress(group, s.getLocalPort()); - byte[] message = "hello".getBytes("UTF-8"); + String msg = "AdaptorMulticasting: " + System.nanoTime(); + byte[] message = msg.getBytes("UTF-8"); // send message to multicast group DatagramPacket p = new DatagramPacket(message, message.length); @@ -421,8 +423,22 @@ static void testSendReceive(MulticastSocket s, InetAddress group) throws IOExcep // receive message s.setSoTimeout(0); - p = new DatagramPacket(new byte[1024], 100); - s.receive(p); + while (true) { + p = new DatagramPacket(new byte[1024], 100); + s.receive(p); + if (p.getPort() == s.getLocalPort()) { + String str = new String(p.getData(), p.getOffset(), p.getLength(), "UTF-8"); + if (Arrays.equals(p.getData(), p.getOffset(), p.getLength(), message, 0, message.length)) { + System.out.format("Got expected message \"%s\" from %s%n", str, p.getSocketAddress()); + break; + } + System.out.println("Unexpected message received. Expected: " + msg); + System.out.println("Received message doesn't match - skipping: " + str); + } else { + System.out.println("Unexpected message received. Expected message from: " + s.getLocalAddress()); + System.out.println("Received message sender doesn't match - skipping: " + p.getSocketAddress()); + } + } assertTrue(p.getLength() == message.length); assertTrue(p.getPort() == s.getLocalPort()); diff --git a/test/jdk/java/nio/channels/DatagramChannel/AfterDisconnect.java b/test/jdk/java/nio/channels/DatagramChannel/AfterDisconnect.java index 6a80d686b30..de59984dae1 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/AfterDisconnect.java +++ b/test/jdk/java/nio/channels/DatagramChannel/AfterDisconnect.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ */ import java.io.IOException; +import java.net.BindException; import java.net.InetAddress; import java.net.Inet6Address; import java.net.InetSocketAddress; @@ -46,6 +47,7 @@ import java.nio.channels.Selector; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; import org.testng.annotations.Test; import static org.testng.Assert.*; @@ -54,6 +56,38 @@ public class AfterDisconnect { + interface RetryableTest { + public void runTest() throws T; + } + + // retry the given lambda (RetryableTest) if an exception + // that satisfies the predicate (retryOn) is caught. + void testWithRetry(RetryableTest test, + Predicate retryOn, + int max) throws T { + for (int i=0; i < max; i++) { + try { + test.runTest(); + break; + } catch (Throwable t) { + if (i < max -1 && retryOn.test(t)) { + System.out.println("Got " + t + "; will retry"); + } else throw t; + } + } + } + + /** + * When calling {@link DatagramChannel#disconnect()} a {@link BindException} + * may occur. In which case we want to retry the test. + */ + class BindExceptionOnDisconnect extends BindException { + BindExceptionOnDisconnect(BindException x) { + super(x.getMessage()); + initCause(x); + } + } + @Test public void execute() throws IOException { IPSupport.throwSkippedExceptionIfNonOperational(); @@ -61,34 +95,41 @@ public void execute() throws IOException { InetAddress lb = InetAddress.getLoopbackAddress(); // test with default protocol family - try (DatagramChannel dc = DatagramChannel.open()) { - System.out.println("Test with default"); - dc.bind(new InetSocketAddress(lb, 0)); - test(dc); - test(dc); - } - - // test with IPv6 socket - if (IPSupport.hasIPv6()) { - System.out.println("Test with IPv6 socket"); - try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET6)) { + System.out.println("Test with default"); + testWithRetry(() -> { + try (DatagramChannel dc = DatagramChannel.open()) { dc.bind(new InetSocketAddress(lb, 0)); test(dc); test(dc); } + }, BindExceptionOnDisconnect.class::isInstance, 5); + + // test with IPv6 socket + if (IPSupport.hasIPv6()) { + System.out.println("Test with IPv6 socket"); + testWithRetry(() -> { + try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET6)) { + dc.bind(new InetSocketAddress(lb, 0)); + test(dc); + test(dc); + } + }, BindExceptionOnDisconnect.class::isInstance, 5); } // test with IPv4 socket if (IPSupport.hasIPv4() && !preferIPv6) { System.out.println("Test with IPv4 socket"); - try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET)) { - dc.bind(new InetSocketAddress(lb, 0)); - test(dc); - test(dc); - } + testWithRetry(() -> { + try (DatagramChannel dc = DatagramChannel.open(StandardProtocolFamily.INET)) { + dc.bind(new InetSocketAddress(lb, 0)); + test(dc); + test(dc); + } + }, BindExceptionOnDisconnect.class::isInstance, 5); } } + void test(DatagramChannel dc) throws IOException { testLocalAddress(dc); testSocketOptions(dc); @@ -111,7 +152,11 @@ void testLocalAddress(DatagramChannel dc) throws IOException { assertEquals(dc.getLocalAddress(), local); assertEquals(dc.getRemoteAddress(), remote); - dc.disconnect(); + try { + dc.disconnect(); + } catch (BindException x) { + throw new BindExceptionOnDisconnect(x); + } assertFalse(dc.isConnected()); assertEquals(dc.getLocalAddress(), local); assertTrue(dc.getRemoteAddress() == null); @@ -134,7 +179,11 @@ void testSocketOptions(DatagramChannel dc) throws IOException { Map, Object> map = options(dc); dc.connect(dc.getLocalAddress()); - dc.disconnect(); + try { + dc.disconnect(); + } catch (BindException x) { + throw new BindExceptionOnDisconnect(x); + } // check socket options have not changed assertEquals(map, options(dc)); @@ -168,7 +217,11 @@ void testSelectorRegistration(DatagramChannel dc) throws IOException { sel.selectNow(); dc.connect(dc.getLocalAddress()); - dc.disconnect(); + try { + dc.disconnect(); + } catch (BindException x) { + throw new BindExceptionOnDisconnect(x); + } // selection key should still be valid assertTrue(key.isValid()); @@ -210,7 +263,11 @@ void testMulticastGroups(DatagramChannel dc) throws IOException { MembershipKey key = dc.join(group, ni); dc.connect(dc.getLocalAddress()); - dc.disconnect(); + try { + dc.disconnect(); + } catch (BindException x) { + throw new BindExceptionOnDisconnect(x); + } // membership key should still be valid assertTrue(key.isValid()); diff --git a/test/jdk/java/nio/channels/DatagramChannel/Connect.java b/test/jdk/java/nio/channels/DatagramChannel/Connect.java index 082a3234cc5..41c172b3328 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/Connect.java +++ b/test/jdk/java/nio/channels/DatagramChannel/Connect.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,8 @@ /* @test * @bug 4313882 7183800 + * @library /test/lib + * @build jdk.test.lib.Platform Connect * @run main/othervm Connect * @summary Test DatagramChannel's send and receive methods */ @@ -38,6 +40,8 @@ import java.util.concurrent.CompletionException; import java.util.stream.Stream; +import jdk.test.lib.Platform; + import static java.nio.charset.StandardCharsets.US_ASCII; public class Connect { @@ -114,9 +118,21 @@ public void run() { ByteBuffer bb = ByteBuffer.allocateDirect(MAX); bb.put(bytes); bb.flip(); + // When connecting an unbound datagram channel, the underlying + // socket will first be bound to the wildcard address. On macOS, + // the system may allocate the same port on which another socket + // is already bound with a more specific address. This may prevent + // datagrams directed at the connected socket to reach it. + // To avoid this, when on macOS, we preemptively bind `dc` to the + // specific address instead of letting it bind to the wildcard. + if (Platform.isOSX()) { + dc.bind(new InetSocketAddress(((InetSocketAddress)connectSocketAddress).getAddress(), 0)); + err.println("Initiator bound to: " + connectSocketAddress); + } err.println("Initiator connecting to: " + connectSocketAddress); dc.connect(connectSocketAddress); err.println("Initiator bound to: " + dc.getLocalAddress()); + assert !connectSocketAddress.equals(dc.getLocalAddress()); // Send a message err.println("Initiator attempting to write to Responder at " + connectSocketAddress); diff --git a/test/jdk/java/nio/channels/DatagramChannel/ManySourcesAndTargets.java b/test/jdk/java/nio/channels/DatagramChannel/ManySourcesAndTargets.java index ed4e9c0c02e..18a18fcba0a 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/ManySourcesAndTargets.java +++ b/test/jdk/java/nio/channels/DatagramChannel/ManySourcesAndTargets.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,7 @@ /* @test * @bug 8234805 8235193 * @summary Test DatagramChannel send/receive and that receive returns the expected - * sender address + * sender address. * @run main/othervm ManySourcesAndTargets * @run main/othervm -Djava.net.preferIPv4Stack=true ManySourcesAndTargets */ @@ -63,6 +63,7 @@ public static void main(String[] args) throws Exception { try (DatagramChannel reader = DatagramChannel.open()) { // bind reader to wildcard address so it can receive from any address reader.bind(new InetSocketAddress(0)); + System.out.println("\nReader bound to: " + reader.getLocalAddress()); for (InetAddress address : addresses) { System.out.format("%n-- %s --%n", address.getHostAddress()); @@ -75,6 +76,7 @@ public static void main(String[] args) throws Exception { try (DatagramChannel sender = DatagramChannel.open()) { // bind sender to wildcard address so it can send to any address sender.bind(new InetSocketAddress(0)); + System.out.println("\nSender bound to: " + sender.getLocalAddress()); for (InetAddress address : addresses) { System.out.format("%n-- %s --%n", address.getHostAddress()); @@ -97,6 +99,11 @@ static void testSend(int count, InetAddress address, DatagramChannel reader) thr sender.bind(new InetSocketAddress(address, 0)); SocketAddress local = sender.getLocalAddress(); + System.out.println("Sender bound to: " + local); + if (((InetSocketAddress)local).getPort() == remotePort) { + System.out.println("testSend: Sender and reader have same port: skipping"); + return; + } byte[] bytes = serialize(local); SocketAddress previousSource = null; @@ -105,6 +112,8 @@ static void testSend(int count, InetAddress address, DatagramChannel reader) thr sender.send(ByteBuffer.wrap(bytes), remote); ByteBuffer bb = ByteBuffer.allocate(1000); + System.out.format("testSend: reader waiting to receive at: %s%n", + reader.getLocalAddress()); SocketAddress source = reader.receive(bb); System.out.format("received datagram from %s%n", source); @@ -138,11 +147,18 @@ static void testReceive(int count, DatagramChannel sender, InetAddress address) SocketAddress remote = reader.getLocalAddress(); + System.out.println("Reader bound to: " + remote); + if (((InetSocketAddress)local).getPort() == ((InetSocketAddress)remote).getPort()) { + System.out.println("testReceive: Sender and reader have same port: skipping"); + return; + } for (int i = 0; i < count; i++) { System.out.format("send %s -> %s%n", local, remote); sender.send(ByteBuffer.allocate(32), remote); ByteBuffer bb = ByteBuffer.allocate(1000); + System.out.format("testReceive: reader waiting to receive at: %s%n", + reader.getLocalAddress()); SocketAddress source = reader.receive(bb); System.out.format("received datagram from %s%n", source); } @@ -165,7 +181,12 @@ private static SocketAddress deserialize(byte[] bytes) throws Exception { private static Optional networkInterface(InetAddress ia) { try { - return Optional.ofNullable(NetworkInterface.getByInetAddress(ia)); + NetworkInterface nif = NetworkInterface.getByInetAddress(ia); + if (nif != null) { + System.out.format("Selecting interface %s[%d]%n\twith addresses:%n\t%s%n", + nif.getDisplayName(), nif.getIndex(), nif.inetAddresses().toList()); + } + return Optional.ofNullable(nif); } catch (SocketException e) { return Optional.empty(); } diff --git a/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java b/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java index c1ea04ba216..87aedc8efd9 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java +++ b/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,15 +99,25 @@ static void receiveDatagram(DatagramChannel dc, ByteBuffer buf = ByteBuffer.allocateDirect(100); try { + long elapsed = 0; for (;;) { System.out.println("Waiting to receive message"); + long start = System.nanoTime(); sel.select(5*1000); + long waited = (System.nanoTime() - start) / 1000_000; + elapsed += waited; + buf.clear(); SocketAddress sa = dc.receive(buf); // no datagram received if (sa == null) { if (expectedSender != null) { - throw new RuntimeException("Expected message not received"); + if (elapsed > 4800) { + throw new RuntimeException("Expected message not received"); + } else { + sel.selectedKeys().clear(); + continue; + } } System.out.println("No message received (correct)"); return; @@ -123,8 +133,8 @@ static void receiveDatagram(DatagramChannel dc, int receivedId = -1; try { receivedId = Integer.parseInt(s); - System.out.format("Received message from %s (id=0x%x)\n", - sender, receivedId); + System.out.format("Received message from %s (id=0x%x, length=%s)\n", + sender, receivedId, bytes.length); } catch (NumberFormatException x) { System.out.format("Received message from %s (msg=%s)\n", sender, s); } @@ -142,7 +152,6 @@ static void receiveDatagram(DatagramChannel dc, } sel.selectedKeys().clear(); - buf.rewind(); } } finally { sel.close(); @@ -160,6 +169,8 @@ static void test(ProtocolFamily family, throws IOException { System.out.format("\nTest DatagramChannel to %s socket\n", family.name()); + System.out.format("With interface=%s[%s]%n\twith bound addresses:%n\t%s%n", + nif.getDisplayName(), nif.getIndex(), nif.inetAddresses().toList()); try (DatagramChannel dc = (family == UNSPEC) ? DatagramChannel.open() : DatagramChannel.open(family)) { dc.setOption(StandardSocketOptions.SO_REUSEADDR, true) diff --git a/test/jdk/java/nio/channels/DatagramChannel/NotBound.java b/test/jdk/java/nio/channels/DatagramChannel/NotBound.java index 4cd9a030336..bcd73591bf9 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/NotBound.java +++ b/test/jdk/java/nio/channels/DatagramChannel/NotBound.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,19 +24,26 @@ /* @test * @bug 4512723 6621689 * @summary Test that connect/send/receive with unbound DatagramChannel causes - * the channel's socket to be bound to a local address + * the channel's socket to be bound to a local address. + * @run main/othervm NotBound */ import java.net.*; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; import java.io.IOException; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; public class NotBound { + static final CountDownLatch received = new CountDownLatch(1); + static void checkBound(DatagramChannel dc) throws IOException { if (dc.getLocalAddress() == null) throw new RuntimeException("Not bound??"); + System.out.println("Bound to: " + dc.getLocalAddress()); } // starts a thread to send a datagram to the given channel once the channel @@ -51,19 +58,44 @@ public void run() { Thread.sleep(50); local = (InetSocketAddress)dc.getLocalAddress(); } while (local == null); + System.out.format("receiver bound to: %s%n", local); - // send message to channel to wakeup receiver - DatagramChannel sender = DatagramChannel.open(); - try { - ByteBuffer bb = ByteBuffer.wrap("hello".getBytes()); - InetAddress lh = InetAddress.getLocalHost(); - SocketAddress target = - new InetSocketAddress(lh, local.getPort()); - sender.send(bb, target); - } finally { - sender.close(); + boolean isAnyLocal = local.getAddress().isAnyLocalAddress(); + int maxAttempts = 5; + int localPort = 0; + List llh = isAnyLocal + ? List.of(InetAddress.getLocalHost(), InetAddress.getLoopbackAddress()) + : List.of(local.getAddress()); + SocketAddress target = null; + for (int i = 0 ; i < maxAttempts ; i++) { + InetAddress lh = llh.get(i % llh.size()); + target = new InetSocketAddress(lh, local.getPort()); + // send message to channel to wakeup receiver + try (DatagramChannel sender = DatagramChannel.open()) { + ByteBuffer bb = ByteBuffer.wrap("NotBound: hello".getBytes()); + sender.send(bb, target); + System.out.format("Woke up receiver: sent datagram to %s from %s%n", + target, sender.getLocalAddress()); + localPort = ((InetSocketAddress)sender.getLocalAddress()).getPort(); + } + if (received.await(250, TimeUnit.MILLISECONDS)) { + // The datagram has been received: no need to continue + // sending + break; + } + // if sender port and destination port were identical, which + // could happen on some systems, the receiver might not receive + // the datagram. So in that case we try again, bailing out if + // we had to retry too many times + if (localPort == local.getPort()) { + System.out.println("Local port and peer port are identical. Retrying..."); + } else { + System.out.println("Datagram not received after 250ms. Retrying..."); + } + } + if (localPort == local.getPort()) { + System.out.println("Couldn't find a port to send to " + target); } - } catch (Exception x) { x.printStackTrace(); } @@ -77,14 +109,12 @@ public static void main(String[] args) throws IOException { // connect dc = DatagramChannel.open(); try { - DatagramChannel peer = DatagramChannel.open() - .bind(new InetSocketAddress(0)); - int peerPort = ((InetSocketAddress)(peer.getLocalAddress())).getPort(); - try { + System.out.println("Check that connect() binds the socket"); + try (DatagramChannel peer = DatagramChannel.open()) { + peer.bind(new InetSocketAddress(0)); + int peerPort = ((InetSocketAddress)(peer.getLocalAddress())).getPort(); dc.connect(new InetSocketAddress(InetAddress.getLocalHost(), peerPort)); checkBound(dc); - } finally { - peer.close(); } } finally { dc.close(); @@ -93,7 +123,8 @@ public static void main(String[] args) throws IOException { // send dc = DatagramChannel.open(); try { - ByteBuffer bb = ByteBuffer.wrap("ignore this".getBytes()); + System.out.println("Check that send() binds the socket"); + ByteBuffer bb = ByteBuffer.wrap("NotBound: ignore this".getBytes()); SocketAddress target = new InetSocketAddress(InetAddress.getLocalHost(), 5000); dc.send(bb, target); @@ -105,9 +136,11 @@ public static void main(String[] args) throws IOException { // receive (blocking) dc = DatagramChannel.open(); try { + System.out.println("Check that blocking receive() binds the socket"); ByteBuffer bb = ByteBuffer.allocateDirect(128); wakeupWhenBound(dc); SocketAddress sender = dc.receive(bb); + received.countDown(); if (sender == null) throw new RuntimeException("Sender should not be null"); checkBound(dc); @@ -118,6 +151,7 @@ public static void main(String[] args) throws IOException { // receive (non-blocking) dc = DatagramChannel.open(); try { + System.out.println("Check that non-blocking receive() binds the socket"); dc.configureBlocking(false); ByteBuffer bb = ByteBuffer.allocateDirect(128); SocketAddress sender = dc.receive(bb); diff --git a/test/jdk/java/nio/channels/DatagramChannel/Promiscuous.java b/test/jdk/java/nio/channels/DatagramChannel/Promiscuous.java index 8db9c60c0b5..a8a5772b87b 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/Promiscuous.java +++ b/test/jdk/java/nio/channels/DatagramChannel/Promiscuous.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,9 +71,11 @@ static int sendDatagram(NetworkInterface nif, dc.setOption(StandardSocketOptions.IP_MULTICAST_IF, nif); byte[] msg = Integer.toString(id).getBytes("UTF-8"); ByteBuffer buf = ByteBuffer.wrap(msg); - System.out.format("Send message -> group %s (id=0x%x)\n", - group.getHostAddress(), id); + System.out.format("Send message -> group [%s]:%d (id=0x%x) nif:%s[%s]%n", + group.getHostAddress(), port, id, nif.getDisplayName(), nif.getIndex()); + System.out.format("bound address before send: %s%n", dc.getLocalAddress()); dc.send(buf, new InetSocketAddress(group, port)); + System.out.format("bound address after send: %s%n", dc.getLocalAddress()); } return id; } @@ -97,15 +99,26 @@ static void receiveDatagram(DatagramChannel dc, ByteBuffer buf = ByteBuffer.allocateDirect(100); try { + long elapsed = 0; for (;;) { System.out.println("Waiting to receive message"); + long start = System.nanoTime(); sel.select(5*1000); + long waited = (System.nanoTime() - start) / 1000_000; + elapsed += waited; + buf.clear(); SocketAddress sa = dc.receive(buf); // no datagram received if (sa == null) { if (datagramExpected) { - throw new RuntimeException("Expected message not received"); + if (elapsed > 4800) { + throw new RuntimeException("Expected message not received"); + } else { + sel.selectedKeys().clear(); + // We haven't waited long enough, + continue; + } } System.out.println("No message received (correct)"); return; @@ -121,8 +134,8 @@ static void receiveDatagram(DatagramChannel dc, int receivedId = -1; try { receivedId = Integer.parseInt(s); - System.out.format("Received message from %s (id=0x%x)\n", - sender, receivedId); + System.out.format("Received message from %s (id=0x%x, length=%s)\n", + sender, receivedId, bytes.length); } catch (NumberFormatException x) { System.out.format("Received message from %s (msg=%s)\n", sender, s); } @@ -140,7 +153,6 @@ static void receiveDatagram(DatagramChannel dc, } sel.selectedKeys().clear(); - buf.rewind(); } } finally { sel.close(); @@ -155,13 +167,14 @@ static void test(ProtocolFamily family, { System.out.format("%nTest family=%s%n", family.name()); + System.out.format("With interface=%s[%s]%n\twith bound addresses:%n\t%s%n", + nif.getDisplayName(), nif.getIndex(), nif.inetAddresses().toList()); - DatagramChannel dc1 = (family == UNSPEC) ? - DatagramChannel.open() : DatagramChannel.open(family); - DatagramChannel dc2 = (family == UNSPEC) ? - DatagramChannel.open() : DatagramChannel.open(family); + try (DatagramChannel dc1 = (family == UNSPEC) ? + DatagramChannel.open() : DatagramChannel.open(family); + DatagramChannel dc2 = (family == UNSPEC) ? + DatagramChannel.open() : DatagramChannel.open(family)) { - try { dc1.setOption(StandardSocketOptions.SO_REUSEADDR, true); dc2.setOption(StandardSocketOptions.SO_REUSEADDR, true); @@ -184,12 +197,8 @@ static void test(ProtocolFamily family, id = sendDatagram(nif, group2, port); - receiveDatagram(dc1, "dc1", false, id); receiveDatagram(dc2, "dc2", true, id); - - } finally { - dc1.close(); - dc2.close(); + receiveDatagram(dc1, "dc1", false, id); } } diff --git a/test/jdk/java/nio/channels/DatagramChannel/ReceiveISA.java b/test/jdk/java/nio/channels/DatagramChannel/ReceiveISA.java index 69df204e915..ef764c85d71 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/ReceiveISA.java +++ b/test/jdk/java/nio/channels/DatagramChannel/ReceiveISA.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,10 +27,19 @@ * @summary Check that DatagramChannel.receive returns a new SocketAddress * when it receives a packet from the same source address but * different endpoint. + * @library /test/lib + * @build jdk.test.lib.NetworkConfiguration + * jdk.test.lib.Platform + * ReceiveISA + * @run main/othervm ReceiveISA + * */ import java.nio.*; import java.nio.channels.*; import java.net.*; + +import jdk.test.lib.Platform; + import static java.lang.System.out; public class ReceiveISA { @@ -44,10 +53,13 @@ public static void main(String args[]) throws Exception { DatagramChannel dc3 = DatagramChannel.open(); DatagramChannel dc4 = DatagramChannel.open()) { // client - dc3.socket().bind((SocketAddress) null); // bind server to any port + InetAddress lh = InetAddress.getLocalHost(); + InetSocketAddress dest = Platform.isOSX() + ? new InetSocketAddress(lh, 0) + : null; + dc3.socket().bind(dest); // bind server to any port // get server address - InetAddress lh = InetAddress.getLocalHost(); InetSocketAddress isa = new InetSocketAddress(lh, dc3.socket().getLocalPort()); ByteBuffer bb = ByteBuffer.allocateDirect(100); diff --git a/test/jdk/java/nio/channels/DatagramChannel/SelectWhenRefused.java b/test/jdk/java/nio/channels/DatagramChannel/SelectWhenRefused.java index d851e4f2b81..58ccf9d0b80 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/SelectWhenRefused.java +++ b/test/jdk/java/nio/channels/DatagramChannel/SelectWhenRefused.java @@ -39,8 +39,8 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; public class SelectWhenRefused { - static final int MAX_TRIES = 3; - static final String GREETINGS_MESSAGE = "Greetings from SelectWhenRefused!"; + static final int MAX_TRIES = 10; + static final String GREETINGS_MESSAGE = System.nanoTime() + ": Greetings from SelectWhenRefused!"; @Test public void test() throws IOException { @@ -49,9 +49,39 @@ public void test() throws IOException { // datagram sent to this address should be refused SocketAddress refuser = new InetSocketAddress(InetAddress.getLocalHost(), port); + System.err.println("Refuser is: " + refuser); - DatagramChannel dc = DatagramChannel.open().bind(new InetSocketAddress(0)); + DatagramChannel dc = null; + for (int i=0; i < MAX_TRIES; i++) { + dc = DatagramChannel.open(); + try { + dc.bind(new InetSocketAddress(0)); + } catch (Throwable t) { + dc.close(); + throw t; + } + + // check the port assigned to dc + if (((InetSocketAddress)dc.getLocalAddress()).getPort() != port) { + // We got a good port. Do not retry + break; + } + + // We bound to the same port that the refuser is using, This will not + // work. Retry binding if possible. + if (i < MAX_TRIES - 1) { + // we will retry... + System.err.format("Refuser port has been reused by dc: %s, retrying...%n", + dc.getLocalAddress()); + } else { + // that was the last attempt... Skip the test + System.err.format("Skipping test: refuser port has been reused by dc: %s%n", + dc.getLocalAddress()); + return; + } + } dc1.close(); + assert dc != null; Selector sel = Selector.open(); try { @@ -88,7 +118,7 @@ public void test() throws IOException { } } catch (BindException e) { // Do nothing, some other test has used this port - System.out.println("Skipping test: refuser port has been reused: " + e); + System.err.println("Skipping test: refuser port has been reused: " + e); } finally { sel.close(); dc.close(); @@ -119,7 +149,9 @@ static boolean testNoPUEBeforeConnection(DatagramChannel dc, // BindException will be thrown if another service is using // our expected refuser port, cannot run just exit. - DatagramChannel.open().bind(refuser).close(); + try (DatagramChannel dc2 = DatagramChannel.open()) { + dc2.bind(refuser); + } throw new RuntimeException("Unexpected wakeup"); } return true; // test passed @@ -151,7 +183,7 @@ static boolean testPUEOnConnect(DatagramChannel dc, byte[] bytes = new byte[buf.remaining()]; buf.get(bytes); String message = new String(bytes); - System.out.format("received %s at %s from %s%n", message, dc.getLocalAddress(), sa); + System.err.format("received %s at %s from %s%n", message, dc.getLocalAddress(), sa); // If any received data contains the message from sendDatagram then throw exception if (message.contains(GREETINGS_MESSAGE)) { @@ -166,10 +198,12 @@ static boolean testPUEOnConnect(DatagramChannel dc, // BindException will be thrown if another service is using // our expected refuser port, cannot run just exit. - DatagramChannel.open().bind(refuser).close(); + try (DatagramChannel dc2 = DatagramChannel.open()) { + dc2.bind(refuser); + } throw new RuntimeException("PortUnreachableException not raised"); } catch (PortUnreachableException pue) { - System.out.println("Got expected PortUnreachableException " + pue); + System.err.println("Got expected PortUnreachableException " + pue); } } return true; // test passed @@ -215,16 +249,16 @@ static void sendDatagram(DatagramChannel dc, SocketAddress remote) * */ static boolean checkUnexpectedWakeup(Set selectedKeys) { - System.out.format("Received %d keys%n", selectedKeys.size()); + System.err.format("Received %d keys%n", selectedKeys.size()); for (SelectionKey key : selectedKeys) { if (!key.isValid() || !key.isReadable()) { - System.out.println("Invalid or unreadable key: " + key); + System.err.println("Invalid or unreadable key: " + key); continue; } try { - System.out.println("Attempting to read datagram from key: " + key); + System.err.println("Attempting to read datagram from key: " + key); DatagramChannel datagramChannel = (DatagramChannel) key.channel(); ByteBuffer buf = ByteBuffer.allocate(100); SocketAddress sa = datagramChannel.receive(buf); @@ -234,7 +268,7 @@ static boolean checkUnexpectedWakeup(Set selectedKeys) { byte[] bytes = new byte[buf.remaining()]; buf.get(bytes); String message = new String(bytes); - System.out.format("received %s at %s from %s%n", message, datagramChannel.getLocalAddress(), sa); + System.err.format("received %s at %s from %s%n", message, datagramChannel.getLocalAddress(), sa); // If any received data contains the message from sendDatagram then return false if (message.contains(GREETINGS_MESSAGE)) { @@ -243,7 +277,7 @@ static boolean checkUnexpectedWakeup(Set selectedKeys) { } } catch (IOException io) { - System.out.println("Unable to read from datagram " + io); + System.err.println("Unable to read from datagram " + io); } } return true; diff --git a/test/jdk/java/nio/channels/DatagramChannel/SendReceiveMaxSize.java b/test/jdk/java/nio/channels/DatagramChannel/SendReceiveMaxSize.java index 31407ecb493..8d74fd8a387 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/SendReceiveMaxSize.java +++ b/test/jdk/java/nio/channels/DatagramChannel/SendReceiveMaxSize.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ import jdk.test.lib.NetworkConfiguration; import jdk.test.lib.Platform; import jdk.test.lib.net.IPSupport; +import org.testng.Assert; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -46,6 +47,7 @@ import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.SocketAddress; import java.nio.ByteBuffer; import java.nio.channels.DatagramChannel; import java.util.ArrayList; @@ -139,7 +141,9 @@ public void testSendReceiveMaxSize(DatagramChannelSupplier supplier, int capacit var addr = new InetSocketAddress(host, port); try (var sender = supplier.open()) { - sender.bind(null); + sender.bind(new InetSocketAddress(host, 0)); + System.out.format("testSendReceiveMaxSize: sender: %s -> receiver: %s%n", + sender.getLocalAddress(), receiver.getLocalAddress()); if (!Platform.isOSX()) { if (sender.getOption(SO_SNDBUF) < capacity) sender.setOption(SO_SNDBUF, capacity); @@ -150,7 +154,18 @@ public void testSendReceiveMaxSize(DatagramChannelSupplier supplier, int capacit var sendBuf = ByteBuffer.wrap(testData); sender.send(sendBuf, addr); var receiveBuf = ByteBuffer.allocate(capacity); - receiver.receive(receiveBuf); + SocketAddress src; + int count = 0; + do { + receiveBuf.clear(); + src = receiver.receive(receiveBuf); + if (sender.getLocalAddress().equals(src)) break; + System.out.println("step1: received unexpected datagram from: " + src); + System.out.println("\texpected: " + sender.getLocalAddress()); + if (++count > 10) { + throw new AssertionError("too many unexpected messages"); + } + } while (true); sendBuf.flip(); receiveBuf.flip(); @@ -167,7 +182,17 @@ public void testSendReceiveMaxSize(DatagramChannelSupplier supplier, int capacit sendBuf = ByteBuffer.wrap(testData); sender.send(sendBuf, addr); receiveBuf = ByteBuffer.allocate(capacity - 1); - receiver.receive(receiveBuf); + count = 0; + do { + receiveBuf.clear(); + src = receiver.receive(receiveBuf); + if (sender.getLocalAddress().equals(src)) break; + System.out.println("step1: received unexpected datagram from: " + src); + System.out.println("\texpected: " + sender.getLocalAddress()); + if (++count > 10) { + throw new AssertionError("too many unexpected messages"); + } + } while (true); sendBuf.flip(); receiveBuf.flip(); diff --git a/test/jdk/java/nio/channels/DatagramChannel/Sender.java b/test/jdk/java/nio/channels/DatagramChannel/Sender.java index 8ab4268e3a4..fcecdf9d79a 100644 --- a/test/jdk/java/nio/channels/DatagramChannel/Sender.java +++ b/test/jdk/java/nio/channels/DatagramChannel/Sender.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,9 @@ /* @test * @bug 4669040 8130394 * @summary Test DatagramChannel subsequent receives with no datagram ready + * @library /test/lib + * @build jdk.test.lib.Platform Sender + * @run main Sender * @author Mike McCloskey */ @@ -36,6 +39,8 @@ import java.nio.ByteOrder; import java.nio.channels.DatagramChannel; +import jdk.test.lib.Platform; + public class Sender { static PrintStream log = System.err; @@ -46,25 +51,26 @@ public static void main(String[] args) throws Exception { } static void test() throws Exception { - Server server = new Server(); - Client client = new Client(server.port()); + try (Server server = new Server()) { + Client client = new Client(server.port()); - Thread serverThread = new Thread(server); - serverThread.start(); + Thread serverThread = new Thread(server); + serverThread.start(); - Thread clientThread = new Thread(client); - clientThread.start(); + Thread clientThread = new Thread(client); + clientThread.start(); - serverThread.join(); - clientThread.join(); + serverThread.join(); + clientThread.join(); - server.throwException(); - client.throwException(); + server.throwException(); + client.throwException(); + } } public static class Client implements Runnable { final int port; - Exception e = null; + volatile Exception e = null; Client(int port) { this.port = port; @@ -76,14 +82,17 @@ void throwException() throws Exception { } public void run() { - try { - DatagramChannel dc = DatagramChannel.open(); + try (DatagramChannel dc = DatagramChannel.open()) { ByteBuffer bb = ByteBuffer.allocateDirect(12); bb.order(ByteOrder.BIG_ENDIAN); bb.putInt(1).putLong(1); bb.flip(); InetAddress address = InetAddress.getLocalHost(); InetSocketAddress isa = new InetSocketAddress(address, port); + if (Platform.isOSX()) { + // avoid binding on wildcard on macOS + dc.bind(new InetSocketAddress(address, 0)); + } dc.connect(isa); clientISA = dc.getLocalAddress(); dc.write(bb); @@ -93,12 +102,16 @@ public void run() { } } - public static class Server implements Runnable { + public static class Server implements Runnable, AutoCloseable { final DatagramChannel dc; - Exception e = null; + volatile Exception e = null; Server() throws IOException { - dc = DatagramChannel.open().bind(new InetSocketAddress(0)); + // avoid binding to wildcard address on macOS + InetSocketAddress lo = Platform.isOSX() + ? new InetSocketAddress(InetAddress.getLocalHost(), 0) + : new InetSocketAddress(0); + dc = DatagramChannel.open().bind(lo); } int port() { @@ -149,6 +162,11 @@ public void run() { e = ex; } } + + @Override + public void close() throws IOException { + dc.close(); + } } } From 6460b300487071bcf98f5ac70d9c0a6fd6b94083 Mon Sep 17 00:00:00 2001 From: Damon Nguyen Date: Mon, 19 Aug 2024 16:43:43 +0000 Subject: [PATCH 34/67] 8321140: Add comment to note difference in Metal's JButton margins Reviewed-by: honkar, aivanov --- .../swing/plaf/basic/BasicLookAndFeel.java | 4 +- .../swing/plaf/metal/MetalLookAndFeel.java | 51 ++++++++++++++----- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicLookAndFeel.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicLookAndFeel.java index 999e7f923c7..4041bfca714 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicLookAndFeel.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicLookAndFeel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -727,6 +727,8 @@ public Object createValue(UIDefaults table) { "Button.highlight", controlLtHighlight, "Button.border", buttonBorder, "Button.margin", new InsetsUIResource(2, 14, 2, 14), + // The above margin has vastly larger horizontal values when + // compared to other look and feels that don't rely on these values "Button.textIconGap", 4, "Button.textShiftOffset", zero, "Button.focusInputMap", new UIDefaults.LazyInputMap(new Object[] { diff --git a/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java b/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java index 98747bbd2dc..e46091c523c 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/metal/MetalLookAndFeel.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,28 +25,53 @@ package javax.swing.plaf.metal; -import java.awt.*; +import java.awt.Color; +import java.awt.Component; +import java.awt.Container; +import java.awt.Frame; +import java.awt.Insets; +import java.awt.Toolkit; +import java.awt.Window; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; -import javax.swing.plaf.*; -import javax.swing.*; -import javax.swing.plaf.basic.*; -import javax.swing.text.DefaultEditorKit; - -import java.awt.Color; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; - import java.security.AccessController; -import sun.awt.*; +import javax.swing.ButtonModel; +import javax.swing.DefaultButtonModel; +import javax.swing.Icon; +import javax.swing.ImageIcon; +import javax.swing.JComponent; +import javax.swing.JDialog; +import javax.swing.JFrame; +import javax.swing.JRootPane; +import javax.swing.JTextField; +import javax.swing.JToggleButton; +import javax.swing.LayoutStyle; +import javax.swing.LookAndFeel; +import javax.swing.SwingConstants; +import javax.swing.SwingUtilities; +import javax.swing.UIDefaults; +import javax.swing.UIManager; +import javax.swing.plaf.BorderUIResource; +import javax.swing.plaf.ColorUIResource; +import javax.swing.plaf.FontUIResource; +import javax.swing.plaf.InsetsUIResource; +import javax.swing.plaf.UIResource; +import javax.swing.plaf.basic.BasicLookAndFeel; +import javax.swing.text.DefaultEditorKit; + +import sun.awt.AppContext; +import sun.awt.OSInfo; +import sun.awt.SunToolkit; import sun.security.action.GetPropertyAction; import sun.swing.DefaultLayoutStyle; -import static javax.swing.UIDefaults.LazyValue; - import sun.swing.SwingAccessor; import sun.swing.SwingUtilities2; +import static javax.swing.UIDefaults.LazyValue; + /** * The Java Look and Feel, otherwise known as Metal. *

@@ -782,6 +807,8 @@ protected void initComponentDefaults(UIDefaults table) { "SPACE", "pressed", "released SPACE", "released" }), + // Button default margin is (2, 14, 2, 14), defined in + // BasicLookAndFeel via "Button.margin" UI property. "CheckBox.disabledText", inactiveControlTextColor, "Checkbox.select", controlShadow, From c7690c34c2d7bff11501188266b7be7a486c1bd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hannes=20Walln=C3=B6fer?= Date: Mon, 19 Aug 2024 17:47:25 +0000 Subject: [PATCH 35/67] 8338190: TOC vertical offsets not updated when document size changes Reviewed-by: jjg --- .../doclets/formats/html/resources/script.js.template | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script.js.template b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script.js.template index 71ef8476708..633f453bc43 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script.js.template +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script.js.template @@ -461,7 +461,7 @@ document.addEventListener("DOMContentLoaded", function(e) { }) } // Resize handler - function handleResize(e) { + new ResizeObserver((entries) => { if (expanded) { if (windowWidth !== window.innerWidth) { collapse(); @@ -475,7 +475,5 @@ document.addEventListener("DOMContentLoaded", function(e) { handleScroll(); } setTopMargin(); - } - window.addEventListener("orientationchange", handleResize); - window.addEventListener("resize", handleResize); + }).observe(document.body); }); From 55851a312baaea5af14c04fb1b436313fe0deac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hannes=20Walln=C3=B6fer?= Date: Mon, 19 Aug 2024 18:05:37 +0000 Subject: [PATCH 36/67] 8281533: Odd "preview" label in link/linkplain Reviewed-by: jjg --- .../doclets/formats/html/HtmlLinkFactory.java | 88 +++++++++++-------- .../formats/html/taglets/LinkTaglet.java | 1 + .../doclet/testPreview/TestPreview.java | 17 +++- .../doclet/testPreview/api/preview/Core.java | 9 +- 4 files changed, 73 insertions(+), 42 deletions(-) diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlLinkFactory.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlLinkFactory.java index 8e0c010dd1a..494d5e22d6e 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlLinkFactory.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/HtmlLinkFactory.java @@ -52,6 +52,7 @@ import jdk.javadoc.internal.html.Content; import jdk.javadoc.internal.html.ContentBuilder; import jdk.javadoc.internal.html.Entity; +import jdk.javadoc.internal.html.HtmlId; import jdk.javadoc.internal.html.HtmlTag; import jdk.javadoc.internal.html.HtmlTree; import jdk.javadoc.internal.html.Text; @@ -296,26 +297,12 @@ protected Content getClassLink(HtmlLinkInfo linkInfo) { Content link = new ContentBuilder(); if (utils.isIncluded(typeElement)) { if (configuration.isGeneratedDoc(typeElement) && !utils.hasHiddenTag(typeElement)) { - DocPath filename = getPath(linkInfo); + DocPath fileName = getPath(linkInfo); if (linkInfo.linkToSelf() || typeElement != m_writer.getCurrentPageElement()) { link.add(m_writer.links.createLink( - filename.fragment(linkInfo.getFragment()), - label, - linkInfo.getStyle(), - title)); - Content spacer = Text.EMPTY; - if (flags.contains(ElementFlag.PREVIEW)) { - link.add(HtmlTree.SUP(m_writer.links.createLink( - filename.fragment(m_writer.htmlIds.forPreviewSection(previewTarget).name()), - m_writer.contents.previewMark))); - spacer = Entity.NO_BREAK_SPACE; - } - if (flags.contains(ElementFlag.RESTRICTED)) { - link.add(spacer); - link.add(HtmlTree.SUP(m_writer.links.createLink( - filename.fragment(m_writer.htmlIds.forRestrictedSection(restrictedTarget).name()), - m_writer.contents.restrictedMark))); - } + fileName.fragment(linkInfo.getFragment()), + label, linkInfo.getStyle(), title)); + addSuperscript(link, flags, fileName, null, previewTarget, restrictedTarget); return link; } } @@ -325,38 +312,61 @@ protected Content getClassLink(HtmlLinkInfo linkInfo) { label, linkInfo.getStyle(), true); if (crossLink != null) { link.add(crossLink); - Content spacer = Text.EMPTY; - if (flags.contains(ElementFlag.PREVIEW)) { - link.add(HtmlTree.SUP(m_writer.getCrossClassLink( - typeElement, - m_writer.htmlIds.forPreviewSection(previewTarget).name(), - m_writer.contents.previewMark, - null, false))); - spacer = Entity.NO_BREAK_SPACE; - } - if (flags.contains(ElementFlag.RESTRICTED)) { - link.add(spacer); - link.add(HtmlTree.SUP(m_writer.getCrossClassLink( - typeElement, - m_writer.htmlIds.forRestrictedSection(restrictedTarget).name(), - m_writer.contents.restrictedMark, - null, false))); - } + addSuperscript(link, flags, null, typeElement, previewTarget, restrictedTarget); return link; } } // Can't link so just write label. link.add(label); + addSuperscript(link, flags, null, null, previewTarget, restrictedTarget); + return link; + } + + /** + * Adds PREVIEW and RESTRICTED superscript labels. Depending on the parameter values, + * labels will be formatted as local or external links or plain text. + * + * @param content the content to add to + * @param flags the flags + * @param fileName file name to link to, or null if no local link target + * @param typeElement external type to link to, or null if no external link + * @param previewTarget preview link target element + * @param restrictedTarget restricted link target element + */ + private void addSuperscript(Content content, Set flags, DocPath fileName, TypeElement typeElement, + Element previewTarget, ExecutableElement restrictedTarget) { Content spacer = Text.EMPTY; if (flags.contains(ElementFlag.PREVIEW)) { - link.add(HtmlTree.SUP(m_writer.contents.previewMark)); + content.add(HtmlTree.SUP(getSuperscript(fileName, typeElement, + m_writer.htmlIds.forPreviewSection(previewTarget), + m_writer.contents.previewMark))); spacer = Entity.NO_BREAK_SPACE; } if (flags.contains(ElementFlag.RESTRICTED)) { - link.add(spacer); - link.add(HtmlTree.SUP(m_writer.contents.restrictedMark)); + content.add(spacer); + content.add(HtmlTree.SUP(getSuperscript(fileName, typeElement, + m_writer.htmlIds.forRestrictedSection(restrictedTarget), + m_writer.contents.restrictedMark))); + } + } + + /** + * Returns PREVIEW or RESTRICTED superscript as either local or external link or as plain text. + * + * @param fileName local file name to link to, or null if no local link target + * @param typeElement external type to link to, or null if no external link + * @param id the id fragment to link to + * @param label the label content + * @return superscript content + */ + private Content getSuperscript(DocPath fileName, TypeElement typeElement, HtmlId id, Content label) { + if (fileName != null) { + return m_writer.links.createLink(fileName.fragment(id.name()), label); + } else if (typeElement != null) { + return (m_writer.getCrossClassLink(typeElement, id.name(), label, null, false)); + } else { + return label; } - return link; } /** diff --git a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/taglets/LinkTaglet.java b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/taglets/LinkTaglet.java index b16a1490b63..15e88da5bae 100644 --- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/taglets/LinkTaglet.java +++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/taglets/LinkTaglet.java @@ -224,6 +224,7 @@ Content linkSeeReferenceOutput(Element holder, labelContent = plainOrCode(isPlain, Text.of(utils.getSimpleName(refClass))); } return htmlWriter.getLink(new HtmlLinkInfo(config, HtmlLinkInfo.Kind.PLAIN, refClass) + .skipPreview(isPlain) .label(labelContent)); } else if (refMem == null) { // This is a fragment reference since refClass and refFragment are not null but refMem is null. diff --git a/test/langtools/jdk/javadoc/doclet/testPreview/TestPreview.java b/test/langtools/jdk/javadoc/doclet/testPreview/TestPreview.java index a59e68d25a0..369312d690a 100644 --- a/test/langtools/jdk/javadoc/doclet/testPreview/TestPreview.java +++ b/test/langtools/jdk/javadoc/doclet/testPreview/TestPreview.java @@ -24,7 +24,7 @@ /* * @test * @bug 8250768 8261976 8277300 8282452 8287597 8325325 8325874 8297879 - * 8331947 + * 8331947 8281533 * @summary test generated docs for items declared using preview * @library ../../lib * @modules jdk.javadoc/jdk.javadoc.internal.tool @@ -156,7 +156,20 @@ public void testPreviewAPIJavadoc() {

  • java.base
  • preview
  • Core
  • - """); + """, + """ + """, + """ +
  • CoreRecord<\ + /a>PREVIEW<\ + /li> +
  • core record
  • """); // 8331947: Support preview features without JEP should not be included in Preview API page checkOutput("preview-list.html", false, "supportMethod"); diff --git a/test/langtools/jdk/javadoc/doclet/testPreview/api/preview/Core.java b/test/langtools/jdk/javadoc/doclet/testPreview/api/preview/Core.java index d2cf31c2e89..1b23d202285 100644 --- a/test/langtools/jdk/javadoc/doclet/testPreview/api/preview/Core.java +++ b/test/langtools/jdk/javadoc/doclet/testPreview/api/preview/Core.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,13 @@ import jdk.internal.javac.PreviewFeature; import jdk.internal.javac.PreviewFeature.Feature; +/** + * Preview feature. Links: {@link CoreRecord}, {@link CoreRecord core record}, + * {@linkplain CoreRecord}, {@linkplain CoreRecord core record}. + * + * @see CoreRecord + * @see CoreRecord core record + */ @PreviewFeature(feature=Feature.TEST) public class Core { } From 68d1f5c33bf3f64f44f8a10c2f9e4007cfd07d2b Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Tue, 20 Aug 2024 05:43:04 +0000 Subject: [PATCH 37/67] 8338543: ClassBuilder withMethod builders should cache the method type symbol Reviewed-by: asotona --- .../classes/java/lang/classfile/ClassBuilder.java | 5 +---- .../internal/classfile/impl/ChainedClassBuilder.java | 10 ++++++++++ .../internal/classfile/impl/DirectClassBuilder.java | 8 ++++++++ 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java index 7ad6e94df3f..98371c9e15a 100644 --- a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java @@ -276,10 +276,7 @@ default ClassBuilder withMethodBody(String name, MethodTypeDesc descriptor, int methodFlags, Consumer handler) { - return withMethodBody(constantPool().utf8Entry(name), - constantPool().utf8Entry(descriptor), - methodFlags, - handler); + return withMethod(name, descriptor, methodFlags, mb -> mb.withCode(handler)); } /** diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java index d31debf6f35..50c1590e8a2 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/ChainedClassBuilder.java @@ -24,6 +24,7 @@ */ package jdk.internal.classfile.impl; +import java.lang.constant.MethodTypeDesc; import java.util.function.Consumer; import java.lang.classfile.*; @@ -78,6 +79,15 @@ public ClassBuilder withMethod(Utf8Entry name, Utf8Entry descriptor, int flags, return this; } + @Override + public ClassBuilder withMethod(String name, MethodTypeDesc descriptor, int flags, Consumer handler) { + var mb = new BufferedMethodBuilder(terminal.constantPool, terminal.context, + constantPool().utf8Entry(name), constantPool().utf8Entry(descriptor), flags, null); + mb.mDesc = descriptor; + consumer.accept(mb.run(handler).toModel()); + return this; + } + @Override public ClassBuilder transformMethod(MethodModel method, MethodTransform transform) { BufferedMethodBuilder builder = new BufferedMethodBuilder(terminal.constantPool, terminal.context, diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java index 21fde0f6002..0d61895fe9f 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/DirectClassBuilder.java @@ -26,6 +26,7 @@ package jdk.internal.classfile.impl; import java.lang.constant.ConstantDescs; +import java.lang.constant.MethodTypeDesc; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -106,6 +107,13 @@ public ClassBuilder withMethod(Utf8Entry name, .run(handler)); } + @Override + public ClassBuilder withMethod(String name, MethodTypeDesc descriptor, int flags, Consumer handler) { + var method = new DirectMethodBuilder(constantPool, context, constantPool.utf8Entry(name), constantPool.utf8Entry(descriptor), flags, null); + method.mDesc = descriptor; + return withMethod(method.run(handler)); + } + @Override public ClassBuilder transformMethod(MethodModel method, MethodTransform transform) { DirectMethodBuilder builder = new DirectMethodBuilder(constantPool, context, method.methodName(), From 9775d57168695dc0d758e017fe5069d93d593f3e Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Tue, 20 Aug 2024 06:15:56 +0000 Subject: [PATCH 38/67] 8338139: {ClassLoading,Memory}MXBean::isVerbose methods are inconsistent with their setVerbose methods Co-authored-by: David Holmes Reviewed-by: lmesnik, dcubed, dholmes --- .../share/services/classLoadingService.cpp | 18 +++- .../share/services/classLoadingService.hpp | 4 +- src/hotspot/share/services/memoryService.cpp | 17 +++- src/hotspot/share/services/memoryService.hpp | 4 +- .../TestVerboseClassLoading.java | 84 +++++++++++++++++++ .../MemoryMXBean/TestVerboseMemory.java | 79 +++++++++++++++++ 6 files changed, 200 insertions(+), 6 deletions(-) create mode 100644 test/jdk/java/lang/management/ClassLoadingMXBean/TestVerboseClassLoading.java create mode 100644 test/jdk/java/lang/management/MemoryMXBean/TestVerboseMemory.java diff --git a/src/hotspot/share/services/classLoadingService.cpp b/src/hotspot/share/services/classLoadingService.cpp index 2df8d12278d..09da45dc079 100644 --- a/src/hotspot/share/services/classLoadingService.cpp +++ b/src/hotspot/share/services/classLoadingService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,6 +128,22 @@ bool ClassLoadingService::set_verbose(bool verbose) { return verbose; } +bool ClassLoadingService::get_verbose() { + for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) { + // set_verbose looks for a non-exact match for class+load, + // so look for all tag sets that match class+load* + if (ts->contains(LogTag::_class) && + ts->contains(LogTag::_load)) { + LogLevelType l = ts->level_for(LogConfiguration::StdoutLog); + if (l != LogLevel::Info && l != LogLevel::Debug && l != LogLevel::Trace) { + return false; + } + } + } + + return true; +} + // Caller to this function must own Management_lock void ClassLoadingService::reset_trace_class_unloading() { assert(Management_lock->owned_by_self(), "Must own the Management_lock"); diff --git a/src/hotspot/share/services/classLoadingService.hpp b/src/hotspot/share/services/classLoadingService.hpp index f9db3da5091..3aeb3f556a5 100644 --- a/src/hotspot/share/services/classLoadingService.hpp +++ b/src/hotspot/share/services/classLoadingService.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,6 +53,7 @@ class ClassLoadingService : public AllStatic { public: static void init() NOT_MANAGEMENT_RETURN; static bool set_verbose(bool verbose) NOT_MANAGEMENT_RETURN_(false); + static bool get_verbose() NOT_MANAGEMENT_RETURN_(false); static void reset_trace_class_unloading() NOT_MANAGEMENT_RETURN; static jlong loaded_class_count() NOT_MANAGEMENT_RETURN_(0L); static jlong unloaded_class_count() NOT_MANAGEMENT_RETURN_(0L); @@ -63,7 +64,6 @@ class ClassLoadingService : public AllStatic { static jlong loaded_shared_class_bytes() NOT_MANAGEMENT_RETURN_(0L); static jlong unloaded_shared_class_bytes() NOT_MANAGEMENT_RETURN_(0L); static jlong class_method_data_size() NOT_MANAGEMENT_RETURN_(0L); - static bool get_verbose() { return log_is_enabled(Info, class, load); } static void notify_class_loaded(InstanceKlass* k, bool shared_class) NOT_MANAGEMENT_RETURN; diff --git a/src/hotspot/share/services/memoryService.cpp b/src/hotspot/share/services/memoryService.cpp index 21b773e204e..de30bee12bd 100644 --- a/src/hotspot/share/services/memoryService.cpp +++ b/src/hotspot/share/services/memoryService.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -202,6 +202,21 @@ bool MemoryService::set_verbose(bool verbose) { return verbose; } +bool MemoryService::get_verbose() { + for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) { + // set_verbose only sets gc and not gc*, so check for an exact match + const bool is_gc_exact_match = ts->contains(LogTag::_gc) && ts->ntags() == 1; + if (is_gc_exact_match) { + LogLevelType l = ts->level_for(LogConfiguration::StdoutLog); + if (l == LogLevel::Info || l == LogLevel::Debug || l == LogLevel::Trace) { + return true; + } + } + } + + return false; +} + Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) { InstanceKlass* ik = Management::java_lang_management_MemoryUsage_klass(CHECK_NH); diff --git a/src/hotspot/share/services/memoryService.hpp b/src/hotspot/share/services/memoryService.hpp index 2d28f25c695..d10c3d2e9d9 100644 --- a/src/hotspot/share/services/memoryService.hpp +++ b/src/hotspot/share/services/memoryService.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,8 +106,8 @@ class MemoryService : public AllStatic { GCCause::Cause cause, bool allMemoryPoolsAffected, const char* notificationMessage = nullptr); - static bool get_verbose() { return log_is_enabled(Info, gc); } static bool set_verbose(bool verbose); + static bool get_verbose(); // Create an instance of java/lang/management/MemoryUsage static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS); diff --git a/test/jdk/java/lang/management/ClassLoadingMXBean/TestVerboseClassLoading.java b/test/jdk/java/lang/management/ClassLoadingMXBean/TestVerboseClassLoading.java new file mode 100644 index 00000000000..4e865f24b29 --- /dev/null +++ b/test/jdk/java/lang/management/ClassLoadingMXBean/TestVerboseClassLoading.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8338139 + * @summary Basic unit test of ClassLoadingMXBean.set/isVerbose() when + * related unified logging is enabled. + * + * @run main/othervm -Xlog:class+load=trace:file=vm.log TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=debug:file=vm.log TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=info:file=vm.log TestVerboseClassLoading false + * + * @run main/othervm -Xlog:class+load=trace TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=debug TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=info TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=warning TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=error TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load=off TestVerboseClassLoading false + * + * @run main/othervm -Xlog:class+load*=trace TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=debug TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=info TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=warning TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load*=error TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load*=off TestVerboseClassLoading false + * + * @run main/othervm -Xlog:class+load*=info,class+load+cause=trace TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=info,class+load+cause=debug TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=info,class+load+cause=info TestVerboseClassLoading true + * @run main/othervm -Xlog:class+load*=info,class+load+cause=warning TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load*=info,class+load+cause=error TestVerboseClassLoading false + * @run main/othervm -Xlog:class+load*=info,class+load+cause=off TestVerboseClassLoading false + * + * @run main/othervm -Xlog:all=trace:file=vm.log TestVerboseClassLoading false + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.ClassLoadingMXBean; + +public class TestVerboseClassLoading { + + public static void main(String[] args) throws Exception { + ClassLoadingMXBean mxBean = ManagementFactory.getClassLoadingMXBean(); + boolean expected = Boolean.parseBoolean(args[0]); + boolean initial = mxBean.isVerbose(); + if (expected != initial) { + throw new Error("Initial verbosity setting was unexpectedly " + initial); + } + mxBean.setVerbose(false); + if (mxBean.isVerbose()) { + throw new Error("Verbosity was still enabled"); + } + mxBean.setVerbose(true); + if (!mxBean.isVerbose()) { + throw new Error("Verbosity was still disabled"); + } + // Turn off again as a double-check and also to avoid excessive logging + mxBean.setVerbose(false); + if (mxBean.isVerbose()) { + throw new Error("Verbosity was still enabled"); + } + } +} diff --git a/test/jdk/java/lang/management/MemoryMXBean/TestVerboseMemory.java b/test/jdk/java/lang/management/MemoryMXBean/TestVerboseMemory.java new file mode 100644 index 00000000000..7d34c45036b --- /dev/null +++ b/test/jdk/java/lang/management/MemoryMXBean/TestVerboseMemory.java @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8338139 + * @summary Basic unit test of TestVerboseMemory.set/isVerbose() when + * related unified logging is enabled. + * + * @run main/othervm -Xlog:gc=trace:file=vm.log TestVerboseMemory false + * @run main/othervm -Xlog:gc=debug:file=vm.log TestVerboseMemory false + * @run main/othervm -Xlog:gc=info:file=vm.log TestVerboseMemory false + * + * @run main/othervm -Xlog:gc=off TestVerboseMemory false + * @run main/othervm -Xlog:gc=error TestVerboseMemory false + * @run main/othervm -Xlog:gc=warning TestVerboseMemory false + * + * @run main/othervm -Xlog:gc=info TestVerboseMemory true + * @run main/othervm -Xlog:gc=trace TestVerboseMemory true + * @run main/othervm -Xlog:gc=debug TestVerboseMemory true + * + * @run main/othervm -Xlog:gc*=info TestVerboseMemory true + * @run main/othervm -Xlog:gc*=debug TestVerboseMemory true + * @run main/othervm -Xlog:gc*=trace TestVerboseMemory true + * + * @run main/othervm -Xlog:gc=info,gc+init=off TestVerboseMemory true + * @run main/othervm -Xlog:gc=off,gc+init=info TestVerboseMemory false + * @run main/othervm -Xlog:gc,gc+init TestVerboseMemory true + * + * @run main/othervm -Xlog:all=trace:file=vm.log TestVerboseMemory false + */ + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +public class TestVerboseMemory { + + public static void main(String[] args) throws Exception { + MemoryMXBean mxBean = ManagementFactory.getMemoryMXBean(); + boolean expected = Boolean.parseBoolean(args[0]); + boolean initial = mxBean.isVerbose(); + if (expected != initial) { + throw new Error("Initial verbosity setting was unexpectedly " + initial); + } + mxBean.setVerbose(false); + if (mxBean.isVerbose()) { + throw new Error("Verbosity was still enabled"); + } + mxBean.setVerbose(true); + if (!mxBean.isVerbose()) { + throw new Error("Verbosity was still disabled"); + } + // Turn off again as a double-check and also to avoid excessive logging + mxBean.setVerbose(false); + if (mxBean.isVerbose()) { + throw new Error("Verbosity was still enabled"); + } + } +} From b9d49dcef22ab81a087d890bbac0329a5244a2ef Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Tue, 20 Aug 2024 08:40:45 +0000 Subject: [PATCH 39/67] 8337981: ShenandoahHeap::is_in should check for alive regions Reviewed-by: rkennke, wkemper --- .../share/gc/shenandoah/shenandoahAsserts.cpp | 46 ++++++++++++------- .../share/gc/shenandoah/shenandoahAsserts.hpp | 16 +++---- .../shenandoahCollectionSet.inline.hpp | 4 +- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 2 +- .../shenandoahForwarding.inline.hpp | 2 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 15 ++++-- .../share/gc/shenandoah/shenandoahHeap.hpp | 2 + .../gc/shenandoah/shenandoahMarkBitMap.cpp | 2 +- .../shenandoah/shenandoahMarkingContext.hpp | 4 +- .../shenandoahMarkingContext.inline.hpp | 12 ++++- .../shenandoahReferenceProcessor.cpp | 31 +++++++++---- .../gc/shenandoah/shenandoahVerifier.cpp | 35 ++++++++------ 12 files changed, 111 insertions(+), 60 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 8235b59b80e..1539c7c2c5d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -37,7 +37,7 @@ void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) { // should be in heap, in known committed region, within that region. ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->is_in(loc)) return; + if (!heap->is_in_reserved(loc)) return; ShenandoahHeapRegion* r = heap->heap_region_containing(loc); if (r != nullptr && r->is_committed()) { @@ -77,7 +77,7 @@ void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) { void ShenandoahAsserts::print_non_obj(ShenandoahMessageBuffer& msg, void* loc) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (heap->is_in(loc)) { + if (heap->is_in_reserved(loc)) { msg.append(" inside Java heap\n"); ShenandoahHeapRegion *r = heap->heap_region_containing(loc); stringStream ss; @@ -96,7 +96,7 @@ void ShenandoahAsserts::print_non_obj(ShenandoahMessageBuffer& msg, void* loc) { void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc) { ShenandoahHeap* heap = ShenandoahHeap::heap(); msg.append(" " PTR_FORMAT " - safe print, no details\n", p2i(loc)); - if (heap->is_in(loc)) { + if (heap->is_in_reserved(loc)) { ShenandoahHeapRegion* r = heap->heap_region_containing(loc); if (r != nullptr) { stringStream ss; @@ -113,7 +113,7 @@ void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_l ShenandoahHeap* heap = ShenandoahHeap::heap(); ResourceMark rm; - bool loc_in_heap = (loc != nullptr && heap->is_in(loc)); + bool loc_in_heap = (loc != nullptr && heap->is_in_reserved(loc)); ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label); @@ -166,22 +166,22 @@ void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_l report_vm_error(file, line, msg.buffer()); } -void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *file, int line) { +void ShenandoahAsserts::assert_in_heap_bounds(void* interior_loc, oop obj, const char *file, int line) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->is_in(obj)) { - print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap failed", - "oop must point to a heap address", + if (!heap->is_in_reserved(obj)) { + print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap_bounds failed", + "oop must be in heap bounds", file, line); } } -void ShenandoahAsserts::assert_in_heap_or_null(void* interior_loc, oop obj, const char *file, int line) { +void ShenandoahAsserts::assert_in_heap_bounds_or_null(void* interior_loc, oop obj, const char *file, int line) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (obj != nullptr && !heap->is_in(obj)) { - print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap_or_null failed", - "oop must point to a heap address", + if (obj != nullptr && !heap->is_in_reserved(obj)) { + print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap_bounds_or_null failed", + "oop must be in heap bounds", file, line); } } @@ -191,9 +191,9 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* // Step 1. Check that obj is correct. // After this step, it is safe to call heap_region_containing(). - if (!heap->is_in(obj)) { + if (!heap->is_in_reserved(obj)) { print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", - "oop must point to a heap address", + "oop must be in heap bounds", file, line); } @@ -210,6 +210,12 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file,line); } + if (!heap->is_in(obj)) { + print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", + "Object should be in active region area", + file, line); + } + oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj); if (obj != fwd) { @@ -223,9 +229,9 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* } // Step 2. Check that forwardee is correct - if (!heap->is_in(fwd)) { + if (!heap->is_in_reserved(fwd)) { print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", - "Forwardee must point to a heap address", + "Forwardee must be in heap bounds", file, line); } @@ -236,9 +242,15 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* } // Step 3. Check that forwardee points to correct region + if (!heap->is_in(fwd)) { + print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", + "Forwardee should be in active region area", + file, line); + } + if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) { print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", - "Non-trivial forwardee should in another region", + "Non-trivial forwardee should be in another region", file, line); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp index c730eafb89d..154edebcf3e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp @@ -53,8 +53,8 @@ class ShenandoahAsserts { static void print_rp_failure(const char *label, BoolObjectClosure* actual, const char *file, int line); - static void assert_in_heap(void* interior_loc, oop obj, const char* file, int line); - static void assert_in_heap_or_null(void* interior_loc, oop obj, const char* file, int line); + static void assert_in_heap_bounds(void* interior_loc, oop obj, const char* file, int line); + static void assert_in_heap_bounds_or_null(void* interior_loc, oop obj, const char* file, int line); static void assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line); static void assert_correct(void* interior_loc, oop obj, const char* file, int line); @@ -74,10 +74,10 @@ class ShenandoahAsserts { static void assert_heaplocked_or_safepoint(const char* file, int line); #ifdef ASSERT -#define shenandoah_assert_in_heap(interior_loc, obj) \ - ShenandoahAsserts::assert_in_heap(interior_loc, obj, __FILE__, __LINE__) -#define shenandoah_assert_in_heap_or_null(interior_loc, obj) \ - ShenandoahAsserts::assert_in_heap_or_null(interior_loc, obj, __FILE__, __LINE__) +#define shenandoah_assert_in_heap_bounds(interior_loc, obj) \ + ShenandoahAsserts::assert_in_heap_bounds(interior_loc, obj, __FILE__, __LINE__) +#define shenandoah_assert_in_heap_bounds_or_null(interior_loc, obj) \ + ShenandoahAsserts::assert_in_heap_bounds_or_null(interior_loc, obj, __FILE__, __LINE__) #define shenandoah_assert_in_correct_region(interior_loc, obj) \ ShenandoahAsserts::assert_in_correct_region(interior_loc, obj, __FILE__, __LINE__) @@ -164,8 +164,8 @@ class ShenandoahAsserts { #define shenandoah_assert_heaplocked_or_safepoint() \ ShenandoahAsserts::assert_heaplocked_or_safepoint(__FILE__, __LINE__) #else -#define shenandoah_assert_in_heap(interior_loc, obj) -#define shenandoah_assert_in_heap_or_null(interior_loc, obj) +#define shenandoah_assert_in_heap_bounds(interior_loc, obj) +#define shenandoah_assert_in_heap_bounds_or_null(interior_loc, obj) #define shenandoah_assert_in_correct_region(interior_loc, obj) #define shenandoah_assert_correct_if(interior_loc, obj, condition) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp index 6eb026561e4..cceebae0b1c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp @@ -41,12 +41,12 @@ bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const { } bool ShenandoahCollectionSet::is_in(oop p) const { - shenandoah_assert_in_heap_or_null(nullptr, p); + shenandoah_assert_in_heap_bounds_or_null(nullptr, p); return is_in_loc(cast_from_oop(p)); } bool ShenandoahCollectionSet::is_in_loc(void* p) const { - assert(p == nullptr || _heap->is_in(p), "Must be in the heap"); + assert(p == nullptr || _heap->is_in_reserved(p), "Must be in the heap"); uintx index = ((uintx) p) >> _region_size_bytes_shift; // no need to subtract the bottom of the heap from p, // _biased_cset_map is biased diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 0301ef422a6..0e18b591037 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -718,7 +718,7 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) { const oop obj = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(obj)) { if (!_mark_context->is_marked(obj)) { - shenandoah_assert_correct(p, obj); + // Note: The obj is dead here. Do not touch it, just clear. ShenandoahHeap::atomic_clear_oop(p, obj); } else if (_evac_in_progress && _heap->in_collection_set(obj)) { oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp index cf69eb67e47..01294f9c890 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp @@ -32,7 +32,7 @@ #include "runtime/javaThread.hpp" inline oop ShenandoahForwarding::get_forwardee_raw(oop obj) { - shenandoah_assert_in_heap(nullptr, obj); + shenandoah_assert_in_heap_bounds(nullptr, obj); return get_forwardee_raw_unchecked(obj); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 6f5fce53f85..a587cc417e3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -730,9 +730,18 @@ size_t ShenandoahHeap::initial_capacity() const { } bool ShenandoahHeap::is_in(const void* p) const { - HeapWord* heap_base = (HeapWord*) base(); - HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); - return p >= heap_base && p < last_region_end; + if (is_in_reserved(p)) { + if (is_full_gc_move_in_progress()) { + // Full GC move is running, we do not have a consistent region + // information yet. But we know the pointer is in heap. + return true; + } + // Now check if we point to a live section in active region. + ShenandoahHeapRegion* r = heap_region_containing(p); + return (r->is_active() && p < r->top()); + } else { + return false; + } } void ShenandoahHeap::maybe_uncommit(double shrink_before, size_t shrink_until) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 81b1c3df6f4..1ee1f9dfc88 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -493,6 +493,8 @@ class ShenandoahHeap : public CollectedHeap, public ShenandoahSpaceInfo { public: bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false); + // Check the pointer is in active part of Java heap. + // Use is_in_reserved to check if object is within heap bounds. bool is_in(const void* p) const override; bool requires_barriers(stackChunkOop obj) const override; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp index 30389b4e95c..fa2e15a98c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp @@ -122,7 +122,7 @@ void ShenandoahMarkBitMap::clear_range_large(MemRegion mr) { #ifdef ASSERT void ShenandoahMarkBitMap::check_mark(HeapWord* addr) const { - assert(ShenandoahHeap::heap()->is_in(addr), + assert(ShenandoahHeap::heap()->is_in_reserved(addr), "Trying to access bitmap " PTR_FORMAT " for address " PTR_FORMAT " not in the heap.", p2i(this), p2i(addr)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp index d58117c02e2..62baf3e61ea 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp @@ -63,8 +63,10 @@ class ShenandoahMarkingContext : public CHeapObj { inline bool mark_weak(oop obj); // Simple versions of marking accessors, to be used outside of marking (e.g. no possible concurrent updates) - inline bool is_marked(oop) const; + inline bool is_marked(oop obj) const; + inline bool is_marked(HeapWord* raw_obj) const; inline bool is_marked_strong(oop obj) const; + inline bool is_marked_strong(HeapWord* raw_obj) const; inline bool is_marked_weak(oop obj) const; inline HeapWord* get_next_marked_addr(HeapWord* addr, HeapWord* limit) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp index 34b8288f476..1ba3caf26b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp @@ -38,11 +38,19 @@ inline bool ShenandoahMarkingContext::mark_weak(oop obj) { } inline bool ShenandoahMarkingContext::is_marked(oop obj) const { - return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(cast_from_oop(obj)); + return is_marked(cast_from_oop(obj)); +} + +inline bool ShenandoahMarkingContext::is_marked(HeapWord* raw_obj) const { + return allocated_after_mark_start(raw_obj) || _mark_bit_map.is_marked(raw_obj); } inline bool ShenandoahMarkingContext::is_marked_strong(oop obj) const { - return allocated_after_mark_start(obj) || _mark_bit_map.is_marked_strong(cast_from_oop(obj)); + return is_marked_strong(cast_from_oop(obj)); +} + +inline bool ShenandoahMarkingContext::is_marked_strong(HeapWord* raw_obj) const { + return allocated_after_mark_start(raw_obj) || _mark_bit_map.is_marked_strong(raw_obj); } inline bool ShenandoahMarkingContext::is_marked_weak(oop obj) const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp index f395119d46a..42c8d0ad271 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp @@ -83,10 +83,21 @@ static volatile T* reference_referent_addr(oop reference) { return (volatile T*)java_lang_ref_Reference::referent_addr_raw(reference); } +inline oop reference_coop_decode_raw(narrowOop v) { + return CompressedOops::is_null(v) ? nullptr : CompressedOops::decode_raw(v); +} + +inline oop reference_coop_decode_raw(oop v) { + return v; +} + +// Raw referent, it can be dead. You cannot treat it as oop without additional safety +// checks, this is why it is HeapWord*. The decoding uses a special-case inlined +// CompressedOops::decode method that bypasses normal oop-ness checks. template -static oop reference_referent(oop reference) { - T heap_oop = Atomic::load(reference_referent_addr(reference)); - return CompressedOops::decode(heap_oop); +static HeapWord* reference_referent_raw(oop reference) { + T raw_oop = Atomic::load(reference_referent_addr(reference)); + return cast_from_oop(reference_coop_decode_raw(raw_oop)); } static void reference_clear_referent(oop reference) { @@ -278,8 +289,8 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType template bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const { - const oop referent = reference_referent(reference); - if (referent == nullptr) { + HeapWord* raw_referent = reference_referent_raw(reference); + if (raw_referent == nullptr) { // Reference has been cleared, by a call to Reference.enqueue() // or Reference.clear() from the application, which means we // should drop the reference. @@ -289,9 +300,9 @@ bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type // Check if the referent is still alive, in which case we should // drop the reference. if (type == REF_PHANTOM) { - return ShenandoahHeap::heap()->complete_marking_context()->is_marked(referent); + return ShenandoahHeap::heap()->complete_marking_context()->is_marked(raw_referent); } else { - return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(referent); + return ShenandoahHeap::heap()->complete_marking_context()->is_marked_strong(raw_referent); } } @@ -303,7 +314,7 @@ void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType ty // next field. An application can't call FinalReference.enqueue(), so there is // no race to worry about when setting the next field. assert(reference_next(reference) == nullptr, "Already inactive"); - assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent(reference)), "only make inactive final refs with alive referents"); + assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent_raw(reference)), "only make inactive final refs with alive referents"); reference_set_next(reference, reference); } else { // Clear referent @@ -376,8 +387,8 @@ oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); #ifdef ASSERT - oop referent = reference_referent(reference); - assert(referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(referent), + HeapWord* raw_referent = reference_referent_raw(reference); + assert(raw_referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(raw_referent), "only drop references with alive referents"); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 23da3d7f637..4834ecba543 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -51,13 +51,6 @@ static bool is_instance_ref_klass(Klass* k) { return k->is_instance_klass() && InstanceKlass::cast(k)->reference_type() != REF_NONE; } -class ShenandoahIgnoreReferenceDiscoverer : public ReferenceDiscoverer { -public: - virtual bool discover_reference(oop obj, ReferenceType type) { - return true; - } -}; - class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { private: const char* _phase; @@ -68,6 +61,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { ShenandoahLivenessData* _ld; void* _interior_loc; oop _loc; + ReferenceIterationMode _ref_mode; public: ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld, @@ -82,10 +76,20 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { _loc(nullptr) { if (options._verify_marked == ShenandoahVerifier::_verify_marked_complete_except_references || options._verify_marked == ShenandoahVerifier::_verify_marked_disable) { - set_ref_discoverer_internal(new ShenandoahIgnoreReferenceDiscoverer()); + // Unknown status for Reference.referent field. Do not touch it, it might be dead. + // Normally, barriers would prevent us from seeing the dead referents, but verifier + // runs with barriers disabled. + _ref_mode = DO_FIELDS_EXCEPT_REFERENT; + } else { + // Otherwise do all fields. + _ref_mode = DO_FIELDS; } } + ReferenceIterationMode reference_iteration_mode() override { + return _ref_mode; + } + private: void check(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) { if (!test) { @@ -119,8 +123,8 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // that failure report would not try to touch something that was not yet verified to be // safe to process. - check(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in(obj), - "oop must be in heap"); + check(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in_reserved(obj), + "oop must be in heap bounds"); check(ShenandoahAsserts::_safe_unknown, obj, is_object_aligned(obj), "oop must be aligned"); @@ -177,8 +181,8 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { ShenandoahHeapRegion* fwd_reg = nullptr; if (obj != fwd) { - check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd), - "Forwardee must be in heap"); + check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in_reserved(fwd), + "Forwardee must be in heap bounds"); check(ShenandoahAsserts::_safe_oop, obj, !CompressedOops::is_null(fwd), "Forwardee is set"); check(ShenandoahAsserts::_safe_oop, obj, is_object_aligned(fwd), @@ -195,6 +199,9 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { fwd_reg = _heap->heap_region_containing(fwd); + check(ShenandoahAsserts::_safe_oop, obj, fwd_reg->is_active(), + "Forwardee should be in active region"); + // Verify that forwardee is not in the dead space: check(ShenandoahAsserts::_safe_oop, obj, !fwd_reg->is_humongous(), "Should have no humongous forwardees"); @@ -324,8 +331,8 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { _loc = nullptr; } - virtual void do_oop(oop* p) { do_oop_work(p); } - virtual void do_oop(narrowOop* p) { do_oop_work(p); } + virtual void do_oop(oop* p) override { do_oop_work(p); } + virtual void do_oop(narrowOop* p) override { do_oop_work(p); } }; class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { From 89ca5b6fbd82f00375b4f96b2f3526078088d3f9 Mon Sep 17 00:00:00 2001 From: Amit Kumar Date: Tue, 20 Aug 2024 09:54:20 +0000 Subject: [PATCH 40/67] 8338365: [PPC64, s390] Out-of-bounds array access in secondary_super_cache Reviewed-by: mdoerr, aph, rrich --- src/hotspot/cpu/ppc/macroAssembler_ppc.cpp | 6 ++---- src/hotspot/cpu/s390/macroAssembler_s390.cpp | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index c7cf678b49e..3b48b4020cc 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2170,7 +2170,6 @@ do { \ (result == R8_ARG6 || result == noreg), "registers must match ppc64.ad"); \ } while(0) -// Return true: we succeeded in generating this code void MacroAssembler::lookup_secondary_supers_table(Register r_sub_klass, Register r_super_klass, Register temp1, @@ -2292,9 +2291,8 @@ void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_kl // The bitmap is full to bursting. // Implicit invariant: BITMAP_FULL implies (length > 0) - assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); - cmpdi(CCR0, r_bitmap, -1); - beq(CCR0, L_huge); + cmpwi(CCR0, r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); + bgt(CCR0, L_huge); // NB! Our caller has checked bits 0 and 1 in the bitmap. The // current slot (at secondary_supers[r_array_index]) has not yet diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index a233934405f..b31d08f9fde 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -3320,8 +3320,8 @@ void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_kl NearLabel L_huge; // The bitmap is full to bursting. - z_cghi(r_bitmap, Klass::SECONDARY_SUPERS_BITMAP_FULL); - z_bre(L_huge); + z_chi(r_array_length, Klass::SECONDARY_SUPERS_BITMAP_FULL - 2); + z_brh(L_huge); // NB! Our caller has checked bits 0 and 1 in the bitmap. The // current slot (at secondary_supers[r_array_index]) has not yet From 7933e45cda7e3eaeabd3b3fa81492ade8e1cc2dc Mon Sep 17 00:00:00 2001 From: Matthias Baesken Date: Tue, 20 Aug 2024 10:43:16 +0000 Subject: [PATCH 41/67] 8338550: Do libubsan1 installation in test container only if requested Reviewed-by: sgehwolf --- .../jdk/test/lib/containers/docker/DockerTestUtils.java | 9 +++++---- .../jdk/test/lib/containers/docker/DockerfileConfig.java | 5 +++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/test/lib/jdk/test/lib/containers/docker/DockerTestUtils.java b/test/lib/jdk/test/lib/containers/docker/DockerTestUtils.java index c87796da47e..4a8915d4631 100644 --- a/test/lib/jdk/test/lib/containers/docker/DockerTestUtils.java +++ b/test/lib/jdk/test/lib/containers/docker/DockerTestUtils.java @@ -321,10 +321,11 @@ private static String limitLines(String buffer, int nrOfLines) { private static void generateDockerFile(Path dockerfile, String baseImage, String baseImageVersion) throws Exception { - String template = - "FROM %s:%s\n" + - "RUN apt-get install libubsan1\n" + - "COPY /jdk /jdk\n" + + String template = "FROM %s:%s\n"; + if (baseImage.contains("ubuntu") && DockerfileConfig.isUbsan()) { + template += "RUN apt-get update && apt-get install -y libubsan1\n"; + } + template = template + "COPY /jdk /jdk\n" + "ENV JAVA_HOME=/jdk\n" + "CMD [\"/bin/bash\"]\n"; String dockerFileStr = String.format(template, baseImage, baseImageVersion); diff --git a/test/lib/jdk/test/lib/containers/docker/DockerfileConfig.java b/test/lib/jdk/test/lib/containers/docker/DockerfileConfig.java index 9d73ad185f1..caa4a5a76cc 100644 --- a/test/lib/jdk/test/lib/containers/docker/DockerfileConfig.java +++ b/test/lib/jdk/test/lib/containers/docker/DockerfileConfig.java @@ -37,6 +37,11 @@ // Note: base image version should not be an empty string. Use "latest" to get the latest version. public class DockerfileConfig { + + public static boolean isUbsan() { + return Boolean.getBoolean("jdk.test.docker.image.isUbsan"); + } + public static String getBaseImageName() { String name = System.getProperty("jdk.test.docker.image.name"); if (name != null) { From 01d03e07c7642e148e4e17848d28686858ea37a7 Mon Sep 17 00:00:00 2001 From: Darragh Clarke Date: Tue, 20 Aug 2024 11:10:18 +0000 Subject: [PATCH 42/67] 8324209: Check implementation of Expect: 100-continue in the java.net.http.HttpClient Reviewed-by: dfuchs, jpai --- .../jdk/internal/net/http/Exchange.java | 75 ++++++++++++------- .../jdk/internal/net/http/ExchangeImpl.java | 10 +++ .../classes/jdk/internal/net/http/Stream.java | 22 +++++- .../net/httpclient/ExpectContinueTest.java | 41 +++++++--- 4 files changed, 109 insertions(+), 39 deletions(-) diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/Exchange.java b/src/java.net.http/share/classes/jdk/internal/net/http/Exchange.java index e643b05422a..eb30dc85e9c 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/Exchange.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/Exchange.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,8 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.function.Function; import java.net.http.HttpClient; import java.net.http.HttpHeaders; @@ -453,32 +455,55 @@ private CompletableFuture checkFor407(ExchangeImpl ex, Throwable t, // for the 100-Continue response private CompletableFuture expectContinue(ExchangeImpl ex) { assert request.expectContinue(); + + long responseTimeoutMillis = 5000; + if (request.timeout().isPresent()) { + final long timeoutMillis = request.timeout().get().toMillis(); + responseTimeoutMillis = Math.min(responseTimeoutMillis, timeoutMillis); + } + return ex.getResponseAsync(parentExecutor) + .completeOnTimeout(null, responseTimeoutMillis, TimeUnit.MILLISECONDS) .thenCompose((Response r1) -> { - Log.logResponse(r1::toString); - int rcode = r1.statusCode(); - if (rcode == 100) { - Log.logTrace("Received 100-Continue: sending body"); - if (debug.on()) debug.log("Received 100-Continue for %s", r1); - CompletableFuture cf = - exchImpl.sendBodyAsync() - .thenCompose(exIm -> exIm.getResponseAsync(parentExecutor)); - cf = wrapForUpgrade(cf); - cf = wrapForLog(cf); - return cf; - } else { - Log.logTrace("Expectation failed: Received {0}", - rcode); - if (debug.on()) debug.log("Expect-Continue failed (%d) for: %s", rcode, r1); - if (upgrading && rcode == 101) { - IOException failed = new IOException( - "Unable to handle 101 while waiting for 100"); - return MinimalFuture.failedFuture(failed); - } - exchImpl.expectContinueFailed(rcode); - return MinimalFuture.completedFuture(r1); - } - }); + // The response will only be null if there was a timeout + // send body regardless + if (r1 == null) { + if (debug.on()) + debug.log("Setting ExpectTimeoutRaised and sending request body"); + exchImpl.setExpectTimeoutRaised(); + CompletableFuture cf = + exchImpl.sendBodyAsync() + .thenCompose(exIm -> exIm.getResponseAsync(parentExecutor)); + cf = wrapForUpgrade(cf); + cf = wrapForLog(cf); + return cf; + } + + Log.logResponse(r1::toString); + int rcode = r1.statusCode(); + if (rcode == 100) { + Log.logTrace("Received 100-Continue: sending body"); + if (debug.on()) + debug.log("Received 100-Continue for %s", r1); + CompletableFuture cf = + exchImpl.sendBodyAsync() + .thenCompose(exIm -> exIm.getResponseAsync(parentExecutor)); + cf = wrapForUpgrade(cf); + cf = wrapForLog(cf); + return cf; + } else { + Log.logTrace("Expectation failed: Received {0}", rcode); + if (debug.on()) + debug.log("Expect-Continue failed (%d) for: %s", rcode, r1); + if (upgrading && rcode == 101) { + IOException failed = new IOException( + "Unable to handle 101 while waiting for 100"); + return MinimalFuture.failedFuture(failed); + } + exchImpl.expectContinueFailed(rcode); + return MinimalFuture.completedFuture(r1); + } + }); } // After sending the request headers, if no ProxyAuthorizationRequired diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/ExchangeImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/ExchangeImpl.java index 404f970cc59..f393b021cd4 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/ExchangeImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/ExchangeImpl.java @@ -58,6 +58,8 @@ abstract class ExchangeImpl { final Exchange exchange; + private volatile boolean expectTimeoutRaised; + // this will be set to true only when the peer explicitly states (through a GOAWAY frame or // a relevant error code in reset frame) that the corresponding stream (id) wasn't processed private volatile boolean unprocessedByPeer; @@ -71,6 +73,14 @@ final Exchange getExchange() { return exchange; } + final void setExpectTimeoutRaised() { + expectTimeoutRaised = true; + } + + final boolean expectTimeoutRaised() { + return expectTimeoutRaised; + } + HttpClientImpl client() { return exchange.client(); } diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/Stream.java b/src/java.net.http/share/classes/jdk/internal/net/http/Stream.java index 1a007e82adc..45633622923 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/Stream.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/Stream.java @@ -1200,11 +1200,17 @@ CompletableFuture getResponseAsync(Executor executor) { try { if (!response_cfs.isEmpty()) { // This CompletableFuture was created by completeResponse(). - // it will be already completed. - cf = response_cfs.remove(0); + // it will be already completed, unless the expect continue + // timeout fired + cf = response_cfs.get(0); + if (cf.isDone()) { + cf = response_cfs.remove(0); + } + // if we find a cf here it should be already completed. // finding a non completed cf should not happen. just assert it. - assert cf.isDone() : "Removing uncompleted response: could cause code to hang!"; + assert cf.isDone() || request.expectContinue && expectTimeoutRaised() + : "Removing uncompleted response: could cause code to hang!"; } else { // getResponseAsync() is called first. Create a CompletableFuture // that will be completed by completeResponse() when @@ -1239,7 +1245,7 @@ void completeResponse(Response resp) { int cfs_len = response_cfs.size(); for (int i=0; i resp, boolean exceptionally, Throwable testThrowable) { + private void verifyRequest(String path, int expectedStatusCode, HttpResponse resp, boolean exceptionally, Throwable testThrowable) { + if (!exceptionally) { + err.printf("Response code %s received for path %s %n", resp.statusCode(), path); + } if (exceptionally && testThrowable != null) { - err.println(testThrowable); + err.println("Finished exceptionally Test throwable: " + testThrowable); assertEquals(IOException.class, testThrowable.getClass()); } else if (exceptionally) { throw new TestException("Expected case to finish with an IOException but testException is null"); } else if (resp != null) { assertEquals(resp.statusCode(), expectedStatusCode); - err.println("Request completed successfully"); + err.println("Request completed successfully for path " + path); err.println("Response Headers: " + resp.headers()); err.println("Response Status Code: " + resp.statusCode()); } From 686eb233d59ab72e872b8dc32cb14bf74519efc5 Mon Sep 17 00:00:00 2001 From: Jaikiran Pai Date: Tue, 20 Aug 2024 12:28:56 +0000 Subject: [PATCH 43/67] 8336817: Several methods on DatagramSocket and MulticastSocket do not specify behaviour when already closed or connected Reviewed-by: dfuchs, alanb --- .../classes/java/net/DatagramSocket.java | 69 +++++++++++-------- .../classes/java/net/MulticastSocket.java | 29 ++++---- 2 files changed, 57 insertions(+), 41 deletions(-) diff --git a/src/java.base/share/classes/java/net/DatagramSocket.java b/src/java.base/share/classes/java/net/DatagramSocket.java index 9e94d1d8b62..655b00bb8b6 100644 --- a/src/java.base/share/classes/java/net/DatagramSocket.java +++ b/src/java.base/share/classes/java/net/DatagramSocket.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -395,7 +395,7 @@ public DatagramSocket(int port, InetAddress laddr) throws SocketException { * * @param addr The address and port to bind to. * @throws SocketException if any error happens during the bind, or if the - * socket is already bound. + * socket is already bound or is closed. * @throws SecurityException if a security manager exists and its * {@code checkListen} method doesn't allow the operation. * @throws IllegalArgumentException if addr is a SocketAddress subclass @@ -422,6 +422,11 @@ public void bind(SocketAddress addr) throws SocketException { * call to send or receive may throw a PortUnreachableException. Note, * there is no guarantee that the exception will be thrown. * + *

    If this socket is already connected, then this method will attempt to + * connect to the given address. If this connect fails then the state of + * this socket is unknown - it may or may not be connected to the address + * that it was previously connected to. + * *

    If a security manager has been installed then it is invoked to check * access to the remote address. Specifically, if the given {@code address} * is a {@link InetAddress#isMulticastAddress multicast address}, @@ -461,7 +466,7 @@ public void bind(SocketAddress addr) throws SocketException { * not permit access to the given remote address * * @throws UncheckedIOException - * may be thrown if connect fails, for example, if the + * if the port is 0 or connect fails, for example, if the * destination address is non-routable * * @see #disconnect @@ -484,6 +489,11 @@ public void connect(InetAddress address, int port) { * have not been {@linkplain #receive(DatagramPacket) received} before invoking * this method, may be discarded. * + *

    If this socket is already connected, then this method will attempt to + * connect to the given address. If this connect fails then the state of + * this socket is unknown - it may or may not be connected to the address + * that it was previously connected to. + * * @param addr The remote address. * * @throws SocketException @@ -643,7 +653,7 @@ public SocketAddress getLocalSocketAddress() { * * @param p the {@code DatagramPacket} to be sent. * - * @throws IOException if an I/O error occurs. + * @throws IOException if an I/O error occurs, or the socket is closed. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} or {@code checkConnect} * method doesn't allow the send. @@ -702,7 +712,7 @@ public void send(DatagramPacket p) throws IOException { * * @param p the {@code DatagramPacket} into which to place * the incoming data. - * @throws IOException if an I/O error occurs. + * @throws IOException if an I/O error occurs, or the socket is closed. * @throws SocketTimeoutException if setSoTimeout was previously called * and the timeout has expired. * @throws PortUnreachableException may be thrown if the socket is connected @@ -770,7 +780,8 @@ public int getLocalPort() { * operation to have effect. * * @param timeout the specified timeout in milliseconds. - * @throws SocketException if there is an error in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @throws IllegalArgumentException if {@code timeout} is negative * @since 1.1 * @see #getSoTimeout() @@ -784,7 +795,8 @@ public void setSoTimeout(int timeout) throws SocketException { * option is disabled (i.e., timeout of infinity). * * @return the setting for SO_TIMEOUT - * @throws SocketException if there is an error in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @since 1.1 * @see #setSoTimeout(int) */ @@ -820,8 +832,8 @@ public int getSoTimeout() throws SocketException { * @param size the size to which to set the send buffer * size, in bytes. This value must be greater than 0. * - * @throws SocketException if there is an error - * in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @throws IllegalArgumentException if the value is 0 or is * negative. * @see #getSendBufferSize() @@ -841,8 +853,8 @@ public void setSendBufferSize(int size) throws SocketException { * getOption(StandardSocketOptions.SO_SNDBUF)}. * * @return the value of the SO_SNDBUF option for this {@code DatagramSocket} - * @throws SocketException if there is an error in - * the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @see #setSendBufferSize * @see StandardSocketOptions#SO_SNDBUF * @since 1.2 @@ -878,8 +890,8 @@ public int getSendBufferSize() throws SocketException { * @param size the size to which to set the receive buffer * size, in bytes. This value must be greater than 0. * - * @throws SocketException if there is an error in - * the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @throws IllegalArgumentException if the value is 0 or is * negative. * @see #getReceiveBufferSize() @@ -899,7 +911,8 @@ public void setReceiveBufferSize(int size) throws SocketException { * getOption(StandardSocketOptions.SO_RCVBUF)}. * * @return the value of the SO_RCVBUF option for this {@code DatagramSocket} - * @throws SocketException if there is an error in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @see #setReceiveBufferSize(int) * @see StandardSocketOptions#SO_RCVBUF * @since 1.2 @@ -959,8 +972,8 @@ public void setReuseAddress(boolean on) throws SocketException { * getOption(StandardSocketOptions.SO_REUSEADDR)}. * * @return a {@code boolean} indicating whether or not SO_REUSEADDR is enabled. - * @throws SocketException if there is an error - * in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @since 1.4 * @see #setReuseAddress(boolean) * @see StandardSocketOptions#SO_REUSEADDR @@ -983,9 +996,8 @@ public boolean getReuseAddress() throws SocketException { * @param on * whether or not to have broadcast turned on. * - * @throws SocketException - * if there is an error in the underlying protocol, such as an UDP - * error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * * @since 1.4 * @see #getBroadcast() @@ -1003,8 +1015,8 @@ public void setBroadcast(boolean on) throws SocketException { * getOption(StandardSocketOptions.SO_BROADCAST)}. * * @return a {@code boolean} indicating whether or not SO_BROADCAST is enabled. - * @throws SocketException if there is an error - * in the underlying protocol, such as an UDP error. + * @throws SocketException if there is an error in the underlying protocol, + * such as an UDP error, or the socket is closed. * @since 1.4 * @see #setBroadcast(boolean) * @see StandardSocketOptions#SO_BROADCAST @@ -1049,8 +1061,8 @@ public boolean getBroadcast() throws SocketException { * setOption(StandardSocketOptions.IP_TOS, tc)}. * * @param tc an {@code int} value for the bitset. - * @throws SocketException if there is an error setting the - * traffic class or type-of-service + * @throws SocketException if there is an error setting the traffic class or type-of-service, + * or the socket is closed. * @since 1.4 * @see #getTrafficClass * @see StandardSocketOptions#IP_TOS @@ -1074,8 +1086,8 @@ public void setTrafficClass(int tc) throws SocketException { * getOption(StandardSocketOptions.IP_TOS)}. * * @return the traffic class or type-of-service already set - * @throws SocketException if there is an error obtaining the - * traffic class or type-of-service value. + * @throws SocketException if there is an error obtaining the traffic class + * or type-of-service value, or the socket is closed. * @since 1.4 * @see #setTrafficClass(int) * @see StandardSocketOptions#IP_TOS @@ -1092,6 +1104,9 @@ public int getTrafficClass() throws SocketException { * *

    If this socket has an associated channel then the channel is closed * as well. + * + *

    Once closed, several of the methods defined by this class will throw + * an exception if invoked on the closed socket. */ public void close() { delegate().close(); @@ -1299,7 +1314,7 @@ public Set> supportedOptions() { * datagram packets, or {@code null}. * @throws IOException if there is an error joining, or when the address * is not a multicast address, or the platform does not support - * multicasting + * multicasting, or the socket is closed * @throws SecurityException if a security manager exists and its * {@code checkMulticast} method doesn't allow the join. * @throws IllegalArgumentException if mcastaddr is {@code null} or is a @@ -1343,7 +1358,7 @@ public void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf) * is unspecified: any interface may be selected or the operation * may fail with a {@code SocketException}. * @throws IOException if there is an error leaving or when the address - * is not a multicast address. + * is not a multicast address, or the socket is closed. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} method doesn't allow the operation. * @throws IllegalArgumentException if mcastaddr is {@code null} or is a diff --git a/src/java.base/share/classes/java/net/MulticastSocket.java b/src/java.base/share/classes/java/net/MulticastSocket.java index 9a9d57118c0..46757a6b407 100644 --- a/src/java.base/share/classes/java/net/MulticastSocket.java +++ b/src/java.base/share/classes/java/net/MulticastSocket.java @@ -221,7 +221,7 @@ public MulticastSocket(SocketAddress bindaddr) throws IOException { * * @param ttl the time-to-live * @throws IOException if an I/O exception occurs - * while setting the default time-to-live value + * while setting the default time-to-live value, or the socket is closed. * @deprecated use the {@link #setTimeToLive(int)} method instead, which uses * int instead of byte as the type for ttl. * @see #getTTL() @@ -250,7 +250,7 @@ public void setTTL(byte ttl) throws IOException { * * @throws IOException * if an I/O exception occurs while setting the - * default time-to-live value + * default time-to-live value, or the socket is closed. * * @see #getTimeToLive() * @see StandardSocketOptions#IP_MULTICAST_TTL @@ -265,7 +265,7 @@ public void setTimeToLive(int ttl) throws IOException { * the socket. * * @throws IOException if an I/O exception occurs - * while getting the default time-to-live value + * while getting the default time-to-live value, or the socket is closed. * @return the default time-to-live value * @deprecated use the {@link #getTimeToLive()} method instead, * which returns an int instead of a byte. @@ -285,7 +285,7 @@ public byte getTTL() throws IOException { * getOption(StandardSocketOptions.IP_MULTICAST_TTL)}. * * @throws IOException if an I/O exception occurs while - * getting the default time-to-live value + * getting the default time-to-live value, or the socket is closed. * @return the default time-to-live value * @see #setTimeToLive(int) * @see StandardSocketOptions#IP_MULTICAST_TTL @@ -311,7 +311,7 @@ public int getTimeToLive() throws IOException { * @param mcastaddr is the multicast address to join * @throws IOException if there is an error joining, * or when the address is not a multicast address, - * or the platform does not support multicasting + * or the platform does not support multicasting, or the socket is closed. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} method doesn't allow the join. * @deprecated This method does not accept the network interface on @@ -339,7 +339,7 @@ public void joinGroup(InetAddress mcastaddr) throws IOException { * * @param mcastaddr is the multicast address to leave * @throws IOException if there is an error leaving - * or when the address is not a multicast address. + * or when the address is not a multicast address, or the socket is closed. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} method doesn't allow the operation. * @deprecated This method does not accept the network interface on which @@ -393,7 +393,7 @@ public void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf) * * @param inf the InetAddress * @throws SocketException if there is an error in - * the underlying protocol, such as a TCP error. + * the underlying protocol, such as a TCP error, or the socket is closed. * @deprecated The InetAddress may not uniquely identify * the network interface. Use * {@link #setNetworkInterface(NetworkInterface)} instead. @@ -413,7 +413,7 @@ public void setInterface(InetAddress inf) throws SocketException { * or if no interface has been set, an {@code InetAddress} * representing any local address. * @throws SocketException if there is an error in the - * underlying protocol, such as a TCP error. + * underlying protocol, such as a TCP error, or the socket is closed. * @deprecated The network interface may not be uniquely identified by * the InetAddress returned. * Use {@link #getNetworkInterface()} instead. @@ -434,7 +434,7 @@ public InetAddress getInterface() throws SocketException { * * @param netIf the interface * @throws SocketException if there is an error in - * the underlying protocol, such as a TCP error. + * the underlying protocol, such as a TCP error, or the socket is closed. * @see #getNetworkInterface() * @see StandardSocketOptions#IP_MULTICAST_IF * @since 1.4 @@ -454,7 +454,7 @@ public void setNetworkInterface(NetworkInterface netIf) * getOption(StandardSocketOptions.IP_MULTICAST_IF)}. * * @throws SocketException if there is an error in - * the underlying protocol, such as a TCP error. + * the underlying protocol, such as a TCP error, or the socket is closed. * @return The multicast {@code NetworkInterface} currently set. A placeholder * NetworkInterface is returned when there is no interface set; it has * a single InetAddress to represent any local address. @@ -476,7 +476,8 @@ public NetworkInterface getNetworkInterface() throws SocketException { * verify what loopback mode is set to should call * {@link #getLoopbackMode()} * @param disable {@code true} to disable the LoopbackMode - * @throws SocketException if an error occurs while setting the value + * @throws SocketException if an error occurs while setting the value, or + * the socket is closed. * @since 1.4 * @deprecated Use {@link #setOption(SocketOption, Object)} with * {@link java.net.StandardSocketOptions#IP_MULTICAST_LOOP} @@ -493,7 +494,8 @@ public void setLoopbackMode(boolean disable) throws SocketException { /** * Get the setting for local loopback of multicast datagrams. * - * @throws SocketException if an error occurs while getting the value + * @throws SocketException if an error occurs while getting the value, or + * the socket is closed. * @return true if the LoopbackMode has been disabled * @since 1.4 * @deprecated Use {@link #getOption(SocketOption)} with @@ -534,8 +536,7 @@ public boolean getLoopbackMode() throws SocketException { * @param ttl optional time to live for multicast packet. * default ttl is 1. * - * @throws IOException is raised if an error occurs i.e - * error while setting ttl. + * @throws IOException if an I/O error occurs, or the socket is closed. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} or {@code checkConnect} * method doesn't allow the send. From bc2700b7dc6039e2a17124a0ca32780383c4a43f Mon Sep 17 00:00:00 2001 From: George Adams Date: Tue, 20 Aug 2024 14:38:31 +0000 Subject: [PATCH 44/67] 8282944: GHA: Add Alpine Linux x86_64 pre-integration check Reviewed-by: ihse --- .github/actions/config/action.yml | 2 +- .github/workflows/build-alpine-linux.yml | 112 +++++++++++++++++++++++ .github/workflows/main.yml | 38 +++++++- make/conf/github-actions.conf | 4 + 4 files changed, 153 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/build-alpine-linux.yml diff --git a/.github/actions/config/action.yml b/.github/actions/config/action.yml index 5f648ffc022..931988accc3 100644 --- a/.github/actions/config/action.yml +++ b/.github/actions/config/action.yml @@ -41,6 +41,6 @@ runs: id: read-config run: | # Extract value from configuration file - value="$(grep -h ${{ inputs.var }}= make/conf/github-actions.conf | cut -d '=' -f 2-)" + value="$(grep -h '^${{ inputs.var }}'= make/conf/github-actions.conf | cut -d '=' -f 2-)" echo "value=$value" >> $GITHUB_OUTPUT shell: bash diff --git a/.github/workflows/build-alpine-linux.yml b/.github/workflows/build-alpine-linux.yml new file mode 100644 index 00000000000..ac5870ca675 --- /dev/null +++ b/.github/workflows/build-alpine-linux.yml @@ -0,0 +1,112 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +name: 'Build (alpine-linux)' + +on: + workflow_call: + inputs: + platform: + required: true + type: string + extra-conf-options: + required: false + type: string + make-target: + required: false + type: string + default: 'product-bundles test-bundles' + debug-levels: + required: false + type: string + default: '[ "debug", "release" ]' + apk-extra-packages: + required: false + type: string + configure-arguments: + required: false + type: string + make-arguments: + required: false + type: string + +jobs: + build-linux: + name: build + runs-on: ubuntu-22.04 + container: + image: alpine:3.20 + + strategy: + fail-fast: false + matrix: + debug-level: ${{ fromJSON(inputs.debug-levels) }} + include: + - debug-level: debug + flags: --with-debug-level=fastdebug + suffix: -debug+ + + steps: + - name: 'Checkout the JDK source' + uses: actions/checkout@v4 + + - name: 'Install toolchain and dependencies' + run: | + apk update + apk add alpine-sdk alsa-lib-dev autoconf bash cups-dev cups-libs fontconfig-dev freetype-dev grep libx11-dev libxext-dev libxrandr-dev libxrender-dev libxt-dev libxtst-dev linux-headers wget zip ${{ inputs.apk-extra-packages }} + + - name: 'Get the BootJDK' + id: bootjdk + uses: ./.github/actions/get-bootjdk + with: + platform: alpine-linux-x64 + + - name: 'Configure' + run: > + bash configure + --with-conf-name=${{ inputs.platform }} + ${{ matrix.flags }} + --with-version-opt=${GITHUB_ACTOR}-${GITHUB_SHA} + --with-boot-jdk=${{ steps.bootjdk.outputs.path }} + --with-zlib=system + --with-jmod-compress=zip-1 + ${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || ( + echo "Dumping config.log:" && + cat config.log && + exit 1) + + - name: 'Build' + id: build + uses: ./.github/actions/do-build + with: + make-target: '${{ inputs.make-target }} ${{ inputs.make-arguments }}' + platform: ${{ inputs.platform }} + debug-suffix: '${{ matrix.suffix }}' + + - name: 'Upload bundles' + uses: ./.github/actions/upload-bundles + with: + platform: ${{ inputs.platform }} + debug-suffix: '${{ matrix.suffix }}' diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1dc0b25c345..e4c05acb684 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -36,7 +36,7 @@ on: platforms: description: 'Platform(s) to execute on (comma separated, e.g. "linux-x64, macos, aarch64")' required: true - default: 'linux-x64, linux-x86-hs, linux-x64-variants, linux-cross-compile, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs' + default: 'linux-x64, linux-x86-hs, linux-x64-variants, linux-cross-compile, alpine-linux-x64, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs' configure-arguments: description: 'Additional configure arguments' required: false @@ -57,11 +57,15 @@ jobs: select: name: 'Select platforms' runs-on: ubuntu-22.04 + env: + # List of platforms to exclude by default + EXCLUDED_PLATFORMS: 'alpine-linux-x64' outputs: linux-x64: ${{ steps.include.outputs.linux-x64 }} linux-x86-hs: ${{ steps.include.outputs.linux-x86-hs }} linux-x64-variants: ${{ steps.include.outputs.linux-x64-variants }} linux-cross-compile: ${{ steps.include.outputs.linux-cross-compile }} + alpine-linux-x64: ${{ steps.include.outputs.alpine-linux-x64 }} macos-x64: ${{ steps.include.outputs.macos-x64 }} macos-aarch64: ${{ steps.include.outputs.macos-aarch64 }} windows-x64: ${{ steps.include.outputs.windows-x64 }} @@ -78,6 +82,10 @@ jobs: # Returns 'true' if the input platform list matches any of the platform monikers given as argument, # 'false' otherwise. # arg $1: platform name or names to look for + + # Convert EXCLUDED_PLATFORMS from a comma-separated string to an array + IFS=',' read -r -a excluded_array <<< "$EXCLUDED_PLATFORMS" + function check_platform() { if [[ $GITHUB_EVENT_NAME == workflow_dispatch ]]; then input='${{ github.event.inputs.platforms }}' @@ -94,7 +102,13 @@ jobs: normalized_input="$(echo ,$input, | tr -d ' ')" if [[ "$normalized_input" == ",," ]]; then - # For an empty input, assume all platforms should run + # For an empty input, assume all platforms should run, except those in the EXCLUDED_PLATFORMS list + for excluded in "${excluded_array[@]}"; do + if [[ "$1" == "$excluded" ]]; then + echo 'false' + return + fi + done echo 'true' return else @@ -105,6 +119,14 @@ jobs: return fi done + + # If not explicitly included, check against the EXCLUDED_PLATFORMS list + for excluded in "${excluded_array[@]}"; do + if [[ "$1" == "$excluded" ]]; then + echo 'false' + return + fi + done fi echo 'false' @@ -114,6 +136,7 @@ jobs: echo "linux-x86-hs=$(check_platform linux-x86-hs linux x86)" >> $GITHUB_OUTPUT echo "linux-x64-variants=$(check_platform linux-x64-variants variants)" >> $GITHUB_OUTPUT echo "linux-cross-compile=$(check_platform linux-cross-compile cross-compile)" >> $GITHUB_OUTPUT + echo "alpine-linux-x64=$(check_platform alpine-linux-x64 alpine-linux x64)" >> $GITHUB_OUTPUT echo "macos-x64=$(check_platform macos-x64 macos x64)" >> $GITHUB_OUTPUT echo "macos-aarch64=$(check_platform macos-aarch64 macos aarch64)" >> $GITHUB_OUTPUT echo "windows-x64=$(check_platform windows-x64 windows x64)" >> $GITHUB_OUTPUT @@ -221,6 +244,16 @@ jobs: make-arguments: ${{ github.event.inputs.make-arguments }} if: needs.select.outputs.linux-cross-compile == 'true' + build-alpine-linux-x64: + name: alpine-linux-x64 + needs: select + uses: ./.github/workflows/build-alpine-linux.yml + with: + platform: alpine-linux-x64 + configure-arguments: ${{ github.event.inputs.configure-arguments }} + make-arguments: ${{ github.event.inputs.make-arguments }} + if: needs.select.outputs.alpine-linux-x64 == 'true' + build-macos-x64: name: macos-x64 needs: select @@ -344,6 +377,7 @@ jobs: - build-linux-x64-hs-minimal - build-linux-x64-hs-optimized - build-linux-cross-compile + - build-alpine-linux-x64 - build-macos-x64 - build-macos-aarch64 - build-windows-x64 diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf index 3cb56b47b50..eca6c05033d 100644 --- a/make/conf/github-actions.conf +++ b/make/conf/github-actions.conf @@ -32,6 +32,10 @@ LINUX_X64_BOOT_JDK_EXT=tar.gz LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz LINUX_X64_BOOT_JDK_SHA256=41536f115668308ecf4eba92aaf6acaeb0936225828b741efd83b6173ba82963 +ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz +ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin22-binaries/releases/download/jdk-22.0.2%2B9/OpenJDK22U-jdk_x64_alpine-linux_hotspot_22.0.2_9.tar.gz +ALPINE_LINUX_X64_BOOT_JDK_SHA256=49f73414824b1a7c268a611225fa4d7ce5e25600201e0f1cd59f94d1040b5264 + MACOS_AARCH64_BOOT_JDK_EXT=tar.gz MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_macos-aarch64_bin.tar.gz MACOS_AARCH64_BOOT_JDK_SHA256=3dab98730234e1a87aec14bcb8171d2cae101e96ff4eed1dab96abbb08e843fd From b442003048559fc35cafddb62885d3ba75b70838 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Tue, 20 Aug 2024 14:44:37 +0000 Subject: [PATCH 45/67] 8338623: StackCounter adding extraneous slots for receiver invoke instructions Reviewed-by: asotona --- .../internal/classfile/impl/StackCounter.java | 7 +++-- test/jdk/jdk/classfile/StackMapsTest.java | 31 +++++++++++++++++-- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java b/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java index 876cb201b79..24a27ad5af6 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java @@ -313,13 +313,14 @@ public StackCounter(LabelContext labelContext, var cpe = cp.entryByIndex(bcs.getIndexU2()); var nameAndType = opcode == INVOKEDYNAMIC ? ((DynamicConstantPoolEntry)cpe).nameAndType() : ((MemberRefEntry)cpe).nameAndType(); var mtd = Util.methodTypeSymbol(nameAndType); - addStackSlot(Util.slotSize(mtd.returnType()) - Util.parameterSlots(mtd)); + var delta = Util.slotSize(mtd.returnType()) - Util.parameterSlots(mtd); if (opcode != INVOKESTATIC && opcode != INVOKEDYNAMIC) { - addStackSlot(-1); + delta--; } + addStackSlot(delta); } case MULTIANEWARRAY -> - addStackSlot (1 - bcs.getU1(bcs.bci + 3)); + addStackSlot(1 - bcs.getU1(bcs.bci + 3)); case JSR -> { addStackSlot(+1); jump(bcs.dest()); //here we lost track of the exact stack size after return from subroutine diff --git a/test/jdk/jdk/classfile/StackMapsTest.java b/test/jdk/jdk/classfile/StackMapsTest.java index a5109dd2a18..3ee8eaf51ee 100644 --- a/test/jdk/jdk/classfile/StackMapsTest.java +++ b/test/jdk/jdk/classfile/StackMapsTest.java @@ -24,7 +24,7 @@ /* * @test * @summary Testing Classfile stack maps generator. - * @bug 8305990 8320222 8320618 8335475 + * @bug 8305990 8320222 8320618 8335475 8338623 * @build testdata.* * @run junit StackMapsTest */ @@ -40,10 +40,10 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; -import static java.lang.constant.ConstantDescs.MTD_void; import static org.junit.jupiter.api.Assertions.*; import static helpers.TestUtil.assertEmpty; import static java.lang.classfile.ClassFile.ACC_STATIC; +import static java.lang.constant.ConstantDescs.*; import java.lang.constant.ClassDesc; import java.lang.constant.ConstantDescs; @@ -341,4 +341,31 @@ void testEmptyCounters(ClassFile.StackMapsOption option) { } } } + + private static final MethodTypeDesc MTD_int = MethodTypeDesc.of(CD_int); + private static final MethodTypeDesc MTD_int_String = MethodTypeDesc.of(CD_int, CD_String); + + @ParameterizedTest + @EnumSource(ClassFile.StackMapsOption.class) + void testInvocationCounters(ClassFile.StackMapsOption option) { + var cf = ClassFile.of(option); + var cd = ClassDesc.of("Test"); + var bytes = cf.build(cd, clb -> clb + .withMethodBody("a", MTD_int_String, ACC_STATIC, cob -> cob + .aload(0) + .invokevirtual(CD_String, "hashCode", MTD_int) + .ireturn()) + .withMethodBody("b", MTD_int, 0, cob -> cob + .aload(0) + .invokevirtual(cd, "hashCode", MTD_int) + .ireturn()) + ); + + var cm = ClassFile.of().parse(bytes); + for (var method : cm.methods()) { + var code = method.findAttribute(Attributes.code()).orElseThrow(); + assertEquals(1, code.maxLocals()); + assertEquals(1, code.maxStack()); + } + } } From 55a97ec8793242c0cacbafd3a4fead25cdce2934 Mon Sep 17 00:00:00 2001 From: Christian Hagedorn Date: Tue, 20 Aug 2024 15:47:16 +0000 Subject: [PATCH 46/67] 8336729: C2: Div/Mod nodes without zero check could be split through iv phi of outer loop of long counted loop nest resulting in SIGFPE Co-authored-by: Emanuel Peter Reviewed-by: epeter, kvn, thartmann --- src/hotspot/share/opto/loopnode.hpp | 2 +- src/hotspot/share/opto/loopopts.cpp | 6 +-- .../splitif/TestSplitDivisionThroughPhi.java | 51 +++++++++++++++++-- 3 files changed, 52 insertions(+), 7 deletions(-) diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp index 94632be268d..1217caa41eb 100644 --- a/src/hotspot/share/opto/loopnode.hpp +++ b/src/hotspot/share/opto/loopnode.hpp @@ -1578,7 +1578,7 @@ class PhaseIdealLoop : public PhaseTransform { bool identical_backtoback_ifs(Node *n); bool can_split_if(Node *n_ctrl); bool cannot_split_division(const Node* n, const Node* region) const; - static bool is_divisor_counted_loop_phi(const Node* divisor, const Node* loop); + static bool is_divisor_loop_phi(const Node* divisor, const Node* loop); bool loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const; // Determine if a method is too big for a/another round of split-if, based on diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp index 361e03f06a2..fc6c1a34823 100644 --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -295,12 +295,12 @@ bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) co } Node* divisor = n->in(2); - return is_divisor_counted_loop_phi(divisor, region) && + return is_divisor_loop_phi(divisor, region) && loop_phi_backedge_type_contains_zero(divisor, zero); } -bool PhaseIdealLoop::is_divisor_counted_loop_phi(const Node* divisor, const Node* loop) { - return loop->is_BaseCountedLoop() && divisor->is_Phi() && divisor->in(0) == loop; +bool PhaseIdealLoop::is_divisor_loop_phi(const Node* divisor, const Node* loop) { + return loop->is_Loop() && divisor->is_Phi() && divisor->in(0) == loop; } bool PhaseIdealLoop::loop_phi_backedge_type_contains_zero(const Node* phi_divisor, const Type* zero) const { diff --git a/test/hotspot/jtreg/compiler/splitif/TestSplitDivisionThroughPhi.java b/test/hotspot/jtreg/compiler/splitif/TestSplitDivisionThroughPhi.java index f9dd8b2dcb6..e95c74616a1 100644 --- a/test/hotspot/jtreg/compiler/splitif/TestSplitDivisionThroughPhi.java +++ b/test/hotspot/jtreg/compiler/splitif/TestSplitDivisionThroughPhi.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,7 @@ /** * @test * @key stress randomness -* @bug 8299259 +* @bug 8299259 8336729 * @requires vm.compiler2.enabled * @summary Test various cases of divisions/modulo which should not be split through iv phis. * @run main/othervm -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:LoopUnrollLimit=0 -XX:+StressGCM -XX:StressSeed=884154126 @@ -35,7 +35,7 @@ /** * @test * @key stress randomness -* @bug 8299259 +* @bug 8299259 8336729 * @requires vm.compiler2.enabled * @summary Test various cases of divisions/modulo which should not be split through iv phis. * @run main/othervm -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:LoopUnrollLimit=0 -XX:+StressGCM @@ -43,6 +43,28 @@ * compiler.splitif.TestSplitDivisionThroughPhi */ +/** + * @test + * @key stress randomness + * @bug 8336729 + * @requires vm.compiler2.enabled + * @summary Test various cases of divisions/modulo which should not be split through iv phis. + * @run main/othervm -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:LoopUnrollLimit=0 -XX:+StressGCM -XX:StressSeed=3434 + * -XX:CompileCommand=compileonly,compiler.splitif.TestSplitDivisionThroughPhi::* + * compiler.splitif.TestSplitDivisionThroughPhi + */ + +/** + * @test + * @key stress randomness + * @bug 8336729 + * @requires vm.compiler2.enabled + * @summary Test various cases of divisions/modulo which should not be split through iv phis. + * @run main/othervm -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:LoopUnrollLimit=0 -XX:+StressGCM + * -XX:CompileCommand=compileonly,compiler.splitif.TestSplitDivisionThroughPhi::* + * compiler.splitif.TestSplitDivisionThroughPhi + */ + package compiler.splitif; public class TestSplitDivisionThroughPhi { @@ -61,6 +83,8 @@ public static void main(String[] strArr) { testPushDivLThruPhiInChain(); testPushModLThruPhi(); testPushModLThruPhiInChain(); + testPushDivLThruPhiForOuterLongLoop(); + testPushModLThruPhiForOuterLongLoop(); } } @@ -78,6 +102,27 @@ static void testPushDivIThruPhi() { } } + // Fixed with JDK-8336729. + static void testPushDivLThruPhiForOuterLongLoop() { + // This loop is first transformed into a LongCountedLoop in the first loop opts phase. + // In the second loop opts phase, the LongCountedLoop is split into an inner and an outer loop. Both get the + // same iv phi type which is [2..10]. Only the inner loop is transformed into a CountedLoopNode while the outer + // loop is still a LoopNode. We run into the same problem as described in testPushDivIThruPhi() when splitting + // the DivL node through the long iv phi of the outer LoopNode. + // The fix for JDK-8299259 only prevents this splitting for CountedLoopNodes. We now extend it to LoopNodes + // in general. + for (long i = 10; i > 1; i -= 2) { + lFld = 10 / i; + } + } + + // Same as testPushDivLThruPhiForOuterLongLoop() but for ModL. + static void testPushModLThruPhiForOuterLongLoop() { + for (int i = 10; i > 1; i -= 2) { + iFld = 10 % i; + } + } + // Same as above but with an additional Mul node between the iv phi and the Div node. Both nodes are split through // the iv phi in one pass of Split If. static void testPushDivIThruPhiInChain() { From 285ceb9ee51e064687da6fc3fbed984e34cf02e6 Mon Sep 17 00:00:00 2001 From: Vladimir Petko Date: Tue, 20 Aug 2024 15:51:40 +0000 Subject: [PATCH 47/67] 8336529: (fs) UnixFileAttributeViews setTimes() failing on armhf, Ubuntu noble Reviewed-by: bpb, alanb --- .../native/libnio/fs/UnixNativeDispatcher.c | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c b/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c index b7afaf4cdce..13fe39c6551 100644 --- a/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c +++ b/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c @@ -254,6 +254,23 @@ static int statx_wrapper(int dirfd, const char *restrict pathname, int flags, } #endif +#if defined(__linux__) && defined(__arm__) +/** + * Lookup functions with time_t parameter. Try to use 64 bit symbol + * if sizeof(time_t) exceeds 32 bit. + */ +static void* lookup_time_t_function(const char* symbol, const char* symbol64) { + void *func_ptr = NULL; + if (sizeof(time_t) > 4) { + func_ptr = dlsym(RTLD_DEFAULT, symbol64); + } + if (func_ptr == NULL) { + return dlsym(RTLD_DEFAULT, symbol); + } + return func_ptr; +} +#endif + /** * Call this to throw an internal UnixException when a system/library * call fails @@ -351,11 +368,20 @@ Java_sun_nio_fs_UnixNativeDispatcher_init(JNIEnv* env, jclass this) #endif my_unlinkat_func = (unlinkat_func*) dlsym(RTLD_DEFAULT, "unlinkat"); my_renameat_func = (renameat_func*) dlsym(RTLD_DEFAULT, "renameat"); +#if defined(__linux__) && defined(__arm__) + my_futimesat_func = (futimesat_func*) lookup_time_t_function("futimesat", + "__futimesat64"); + my_lutimes_func = (lutimes_func*) lookup_time_t_function("lutimes", + "__lutimes64"); + my_futimens_func = (futimens_func*) lookup_time_t_function("futimens", + "__futimens64"); +#else #ifndef _ALLBSD_SOURCE my_futimesat_func = (futimesat_func*) dlsym(RTLD_DEFAULT, "futimesat"); my_lutimes_func = (lutimes_func*) dlsym(RTLD_DEFAULT, "lutimes"); #endif my_futimens_func = (futimens_func*) dlsym(RTLD_DEFAULT, "futimens"); +#endif #if defined(_AIX) // Make sure we link to the 64-bit version of the function my_fdopendir_func = (fdopendir_func*) dlsym(RTLD_DEFAULT, "fdopendir64"); From c646efc366342564baebd2f17133e14780abcaa8 Mon Sep 17 00:00:00 2001 From: Leonid Mesnik Date: Tue, 20 Aug 2024 18:41:15 +0000 Subject: [PATCH 48/67] 8205957: setfldw001/TestDescription.java fails with bad field value Reviewed-by: sspitsyn, dlong --- src/hotspot/share/runtime/javaCalls.cpp | 46 +++++++++++-------- test/hotspot/jtreg/ProblemList-Xcomp.txt | 5 -- .../setfmodw001/TestDescription.java | 8 ---- 3 files changed, 26 insertions(+), 33 deletions(-) diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 0ae0d4540e4..b3b7b6f6834 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -357,14 +357,6 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC CompilationPolicy::compile_if_required(method, CHECK); - // Since the call stub sets up like the interpreter we call the from_interpreted_entry - // so we can go compiled via a i2c. Otherwise initial entry method will always - // run interpreted. - address entry_point = method->from_interpreted_entry(); - if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { - entry_point = method->interpreter_entry(); - } - // Figure out if the result value is an oop or not (Note: This is a different value // than result_type. result_type will be T_INT of oops. (it is about size) BasicType result_type = runtime_type_from(result); @@ -398,20 +390,34 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC // the call to call_stub, the optimizer produces wrong code. intptr_t* result_val_address = (intptr_t*)(result->get_value_addr()); intptr_t* parameter_address = args->parameters(); + + address entry_point; + { + // The enter_interp_only_mode use handshake to set interp_only mode + // so no safepoint should be allowed between is_interp_only_mode() and call + NoSafepointVerifier nsv; + if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { + entry_point = method->interpreter_entry(); + } else { + // Since the call stub sets up like the interpreter we call the from_interpreted_entry + // so we can go compiled via a i2c. + entry_point = method->from_interpreted_entry(); #if INCLUDE_JVMCI - // Gets the alternative target (if any) that should be called - Handle alternative_target = args->alternative_target(); - if (!alternative_target.is_null()) { - // Must extract verified entry point from HotSpotNmethod after VM to Java - // transition in JavaCallWrapper constructor so that it is safe with - // respect to nmethod sweeping. - address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(nullptr, alternative_target()); - if (verified_entry_point != nullptr) { - thread->set_jvmci_alternate_call_target(verified_entry_point); - entry_point = method->adapter()->get_i2c_entry(); + // Gets the alternative target (if any) that should be called + Handle alternative_target = args->alternative_target(); + if (!alternative_target.is_null()) { + // Must extract verified entry point from HotSpotNmethod after VM to Java + // transition in JavaCallWrapper constructor so that it is safe with + // respect to nmethod sweeping. + address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(nullptr, alternative_target()); + if (verified_entry_point != nullptr) { + thread->set_jvmci_alternate_call_target(verified_entry_point); + entry_point = method->adapter()->get_i2c_entry(); + } + } +#endif } } -#endif StubRoutines::call_stub()( (address)&link, // (intptr_t*)&(result->_value), // see NOTE above (compiler problem) diff --git a/test/hotspot/jtreg/ProblemList-Xcomp.txt b/test/hotspot/jtreg/ProblemList-Xcomp.txt index 5617ea0efa6..9d91cad1dde 100644 --- a/test/hotspot/jtreg/ProblemList-Xcomp.txt +++ b/test/hotspot/jtreg/ProblemList-Xcomp.txt @@ -29,11 +29,6 @@ vmTestbase/nsk/jvmti/AttachOnDemand/attach020/TestDescription.java 8287324 generic-all -vmTestbase/nsk/jvmti/SetFieldAccessWatch/setfldw001/TestDescription.java#id0 8205957 generic-all -vmTestbase/nsk/jvmti/SetFieldAccessWatch/setfldw001/TestDescription.java#logging 8205957 generic-all -vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java#id0 8205957 generic-all -vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java#logging 8205957 generic-all - vmTestbase/nsk/jvmti/scenarios/sampling/SP07/sp07t002/TestDescription.java 8245680 windows-x64 vmTestbase/vm/mlvm/mixed/stress/regression/b6969574/INDIFY_Test.java 8265295 linux-x64,windows-x64 diff --git a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java index d0cbdc288b4..e52340e7722 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetFieldModificationWatch/setfmodw001/TestDescription.java @@ -47,11 +47,3 @@ * /test/lib * @run main/othervm/native -agentlib:setfmodw001 nsk.jvmti.SetFieldModificationWatch.setfmodw001 */ - -/* - * @test id=logging - * - * @library /vmTestbase - * /test/lib - * @run main/othervm/native -agentlib:setfmodw001 -XX:TraceJVMTI=ec+,+ioe,+s -Xlog:jvmti=trace:file=vm.%p.log nsk.jvmti.SetFieldModificationWatch.setfmodw001 - */ From 0267284c52a450afaec78a542910381f5bff58fb Mon Sep 17 00:00:00 2001 From: Mark Reinhold Date: Tue, 20 Aug 2024 18:50:17 +0000 Subject: [PATCH 49/67] 8338611: java.lang.module specification wording not aligned with JEP 261 Reviewed-by: alanb --- .../share/classes/java/lang/module/package-info.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/java.base/share/classes/java/lang/module/package-info.java b/src/java.base/share/classes/java/lang/module/package-info.java index 3deaf3ad0a1..7b9f60c17e1 100644 --- a/src/java.base/share/classes/java/lang/module/package-info.java +++ b/src/java.base/share/classes/java/lang/module/package-info.java @@ -154,9 +154,9 @@ * application module specified to the 'java' launcher. When compiling code in * the unnamed module, or at run-time when the main application class is loaded * from the class path, then the default set of root modules is implementation - * specific. In the JDK the default set of root modules contains every module - * that is observable on the upgrade module path or among the system modules, - * and that exports at least one package without qualification.

    + * specific. In the JDK the default set of root modules contains every module on + * the upgrade module path or among the system modules that exports at least one + * package, without qualification.

    * *

    Observable modules

    * From 1ebf2cf639300728ffc024784f5dc1704317b0b3 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Tue, 20 Aug 2024 19:02:38 +0000 Subject: [PATCH 50/67] 8336756: Improve ClassFile Annotation writing Reviewed-by: asotona --- .../java/lang/classfile/AnnotationValue.java | 5 +- .../classfile/impl/AnnotationImpl.java | 109 +++--------------- .../classfile/impl/AnnotationReader.java | 32 ++++- 3 files changed, 47 insertions(+), 99 deletions(-) diff --git a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java index 4decff86ad7..33cfb7f7c00 100644 --- a/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java +++ b/src/java.base/share/classes/java/lang/classfile/AnnotationValue.java @@ -97,10 +97,7 @@ sealed interface OfArray extends AnnotationValue * @since 22 */ @PreviewFeature(feature = PreviewFeature.Feature.CLASSFILE_API) - sealed interface OfConstant - extends AnnotationValue - permits OfString, OfDouble, OfFloat, OfLong, OfInt, OfShort, OfChar, OfByte, - OfBoolean, AnnotationImpl.OfConstantImpl { + sealed interface OfConstant extends AnnotationValue { /** * {@return the constant pool entry backing this constant element} * diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationImpl.java index 794e7915960..b2c67795fc4 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationImpl.java @@ -32,37 +32,18 @@ import static java.lang.classfile.ClassFile.*; public record AnnotationImpl(Utf8Entry className, List elements) - implements Annotation, Util.Writable { + implements Annotation { public AnnotationImpl { elements = List.copyOf(elements); } - @Override - public void writeTo(BufWriterImpl buf) { - buf.writeIndex(className()); - buf.writeU2(elements().size()); - for (var e : elements) { - buf.writeIndex(e.name()); - AnnotationReader.writeAnnotationValue(buf, e.value()); - } - } - @Override public String toString() { StringBuilder sb = new StringBuilder("Annotation["); sb.append(className().stringValue()); List evps = elements(); - if (!evps.isEmpty()) - sb.append(" ["); - for (AnnotationElement evp : evps) { - sb.append(evp.name().stringValue()) - .append("=") - .append(evp.value().toString()) - .append(", "); - } if (!evps.isEmpty()) { - sb.delete(sb.length()-1, sb.length()); - sb.append("]"); + sb.append(' ').append(evps); } sb.append("]"); return sb.toString(); @@ -70,28 +51,15 @@ public String toString() { public record AnnotationElementImpl(Utf8Entry name, AnnotationValue value) - implements AnnotationElement, Util.Writable { - + implements AnnotationElement { @Override - public void writeTo(BufWriterImpl buf) { - buf.writeIndex(name()); - AnnotationReader.writeAnnotationValue(buf, value()); + public String toString() { + return name + "=" + value; } } - public sealed interface OfConstantImpl extends AnnotationValue.OfConstant, Util.Writable { - - @Override - default void writeTo(BufWriterImpl buf) { - buf.writeU1(tag()); - buf.writeIndex(constant()); - } - - } - public record OfStringImpl(Utf8Entry constant) - implements OfConstantImpl, AnnotationValue.OfString { - + implements AnnotationValue.OfString { @Override public char tag() { return AEV_STRING; @@ -104,8 +72,7 @@ public String stringValue() { } public record OfDoubleImpl(DoubleEntry constant) - implements OfConstantImpl, AnnotationValue.OfDouble { - + implements AnnotationValue.OfDouble { @Override public char tag() { return AEV_DOUBLE; @@ -118,8 +85,7 @@ public double doubleValue() { } public record OfFloatImpl(FloatEntry constant) - implements OfConstantImpl, AnnotationValue.OfFloat { - + implements AnnotationValue.OfFloat { @Override public char tag() { return AEV_FLOAT; @@ -132,8 +98,7 @@ public float floatValue() { } public record OfLongImpl(LongEntry constant) - implements OfConstantImpl, AnnotationValue.OfLong { - + implements AnnotationValue.OfLong { @Override public char tag() { return AEV_LONG; @@ -146,8 +111,7 @@ public long longValue() { } public record OfIntImpl(IntegerEntry constant) - implements OfConstantImpl, AnnotationValue.OfInt { - + implements AnnotationValue.OfInt { @Override public char tag() { return AEV_INT; @@ -160,8 +124,7 @@ public int intValue() { } public record OfShortImpl(IntegerEntry constant) - implements OfConstantImpl, AnnotationValue.OfShort { - + implements AnnotationValue.OfShort { @Override public char tag() { return AEV_SHORT; @@ -174,8 +137,7 @@ public short shortValue() { } public record OfCharImpl(IntegerEntry constant) - implements OfConstantImpl, AnnotationValue.OfChar { - + implements AnnotationValue.OfChar { @Override public char tag() { return AEV_CHAR; @@ -188,8 +150,7 @@ public char charValue() { } public record OfByteImpl(IntegerEntry constant) - implements OfConstantImpl, AnnotationValue.OfByte { - + implements AnnotationValue.OfByte { @Override public char tag() { return AEV_BYTE; @@ -202,8 +163,7 @@ public byte byteValue() { } public record OfBooleanImpl(IntegerEntry constant) - implements OfConstantImpl, AnnotationValue.OfBoolean { - + implements AnnotationValue.OfBoolean { @Override public char tag() { return AEV_BOOLEAN; @@ -216,8 +176,7 @@ public boolean booleanValue() { } public record OfArrayImpl(List values) - implements AnnotationValue.OfArray, Util.Writable { - + implements AnnotationValue.OfArray { public OfArrayImpl { values = List.copyOf(values); } @@ -226,61 +185,29 @@ public record OfArrayImpl(List values) public char tag() { return AEV_ARRAY; } - - @Override - public void writeTo(BufWriterImpl buf) { - buf.writeU1(tag()); - buf.writeU2(values.size()); - for (var e : values) { - AnnotationReader.writeAnnotationValue(buf, e); - } - } - } public record OfEnumImpl(Utf8Entry className, Utf8Entry constantName) - implements AnnotationValue.OfEnum, Util.Writable { + implements AnnotationValue.OfEnum { @Override public char tag() { return AEV_ENUM; } - - @Override - public void writeTo(BufWriterImpl buf) { - buf.writeU1(tag()); - buf.writeIndex(className); - buf.writeIndex(constantName); - } - } public record OfAnnotationImpl(Annotation annotation) - implements AnnotationValue.OfAnnotation, Util.Writable { + implements AnnotationValue.OfAnnotation { @Override public char tag() { return AEV_ANNOTATION; } - - @Override - public void writeTo(BufWriterImpl buf) { - buf.writeU1(tag()); - AnnotationReader.writeAnnotation(buf, annotation); - } - } public record OfClassImpl(Utf8Entry className) - implements AnnotationValue.OfClass, Util.Writable { + implements AnnotationValue.OfClass { @Override public char tag() { return AEV_CLASS; } - - @Override - public void writeTo(BufWriterImpl buf) { - buf.writeU1(tag()); - buf.writeIndex(className); - } - } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java index 6802d6e75aa..69511131b37 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AnnotationReader.java @@ -281,8 +281,13 @@ private static int skipTypeAnnotation(ClassReader classReader, int p) { } public static void writeAnnotation(BufWriterImpl buf, Annotation annotation) { - // TODO annotation cleanup later - ((Util.Writable) annotation).writeTo(buf); + buf.writeIndex(annotation.className()); + var elements = annotation.elements(); + buf.writeU2(elements.size()); + for (var e : elements) { + buf.writeIndex(e.name()); + AnnotationReader.writeAnnotationValue(buf, e.value()); + } } public static void writeAnnotations(BufWriter buf, List list) { @@ -354,7 +359,26 @@ public static void writeTypeAnnotations(BufWriter buf, List list } public static void writeAnnotationValue(BufWriterImpl buf, AnnotationValue value) { - // TODO annotation cleanup later - ((Util.Writable) value).writeTo(buf); + var tag = value.tag(); + buf.writeU1(tag); + switch (value.tag()) { + case AEV_BOOLEAN, AEV_BYTE, AEV_CHAR, AEV_DOUBLE, AEV_FLOAT, AEV_INT, AEV_LONG, AEV_SHORT, AEV_STRING -> + buf.writeIndex(((AnnotationValue.OfConstant) value).constant()); + case AEV_CLASS -> buf.writeIndex(((AnnotationValue.OfClass) value).className()); + case AEV_ENUM -> { + var enumValue = (AnnotationValue.OfEnum) value; + buf.writeIndex(enumValue.className()); + buf.writeIndex(enumValue.constantName()); + } + case AEV_ANNOTATION -> writeAnnotation(buf, ((AnnotationValue.OfAnnotation) value).annotation()); + case AEV_ARRAY -> { + var array = ((AnnotationValue.OfArray) value).values(); + buf.writeU2(array.size()); + for (var e : array) { + writeAnnotationValue(buf, e); + } + } + default -> throw new InternalError("Unknown value " + value); + } } } From d72810794bf70f82e46f7220698e4d27d5973d5b Mon Sep 17 00:00:00 2001 From: Chris Plummer Date: Tue, 20 Aug 2024 23:34:31 +0000 Subject: [PATCH 51/67] 8338482: com/sun/jdi/ThreadMemoryLeakTest.java requires that compressed oops are enabled Reviewed-by: amenkov, kevinw --- test/jdk/com/sun/jdi/ThreadMemoryLeakTest.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/jdk/com/sun/jdi/ThreadMemoryLeakTest.java b/test/jdk/com/sun/jdi/ThreadMemoryLeakTest.java index cb2264a66eb..ef44829f1f7 100644 --- a/test/jdk/com/sun/jdi/ThreadMemoryLeakTest.java +++ b/test/jdk/com/sun/jdi/ThreadMemoryLeakTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,9 @@ * @bug 8297638 * @summary JDI memory leak when creating and destroying many threads * - * @comment Don't allow -Xcomp or -Xint as they impact memory useage and number of iterations - * @requires (vm.compMode == "Xmixed") + * @comment Don't allow -Xcomp or -Xint as they impact memory useage and number of iterations. + * Require compressed oops because not doing so increases memory usage. + * @requires (vm.compMode == "Xmixed") & vm.opt.final.UseCompressedOops * @run build TestScaffold VMConnection TargetListener TargetAdapter * @run compile -g ThreadMemoryLeakTest.java * @comment run with -Xmx7m so any leak will quickly produce OOME From 88ccbb60919e4523064b0da17184eedcd9efa087 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Wed, 21 Aug 2024 01:05:41 +0000 Subject: [PATCH 52/67] 8336934: Clean up JavaLangReflectAccess Reviewed-by: rriggs, darcy --- .../java/lang/reflect/Constructor.java | 8 +++ .../classes/java/lang/reflect/Method.java | 15 ----- .../java/lang/reflect/ReflectAccess.java | 63 ++----------------- .../access/JavaLangReflectAccess.java | 52 +++------------ .../jdk/internal/access/SharedSecrets.java | 8 ++- .../internal/reflect/ReflectionFactory.java | 57 +++-------------- 6 files changed, 35 insertions(+), 168 deletions(-) diff --git a/src/java.base/share/classes/java/lang/reflect/Constructor.java b/src/java.base/share/classes/java/lang/reflect/Constructor.java index 99f14d01536..668fc575688 100644 --- a/src/java.base/share/classes/java/lang/reflect/Constructor.java +++ b/src/java.base/share/classes/java/lang/reflect/Constructor.java @@ -165,6 +165,14 @@ Constructor copy() { return res; } + // Creates a new root constructor with a custom accessor for serialization hooks. + Constructor newWithAccessor(ConstructorAccessor accessor) { + var res = new Constructor<>(clazz, parameterTypes, exceptionTypes, modifiers, slot, + signature, annotations, parameterAnnotations); + res.constructorAccessor = accessor; + return res; + } + /** * {@inheritDoc} * diff --git a/src/java.base/share/classes/java/lang/reflect/Method.java b/src/java.base/share/classes/java/lang/reflect/Method.java index 80e0209249c..9b929509882 100644 --- a/src/java.base/share/classes/java/lang/reflect/Method.java +++ b/src/java.base/share/classes/java/lang/reflect/Method.java @@ -173,21 +173,6 @@ Method copy() { return res; } - /** - * Make a copy of a leaf method. - */ - Method leafCopy() { - if (this.root == null) - throw new IllegalArgumentException("Can only leafCopy a non-root Method"); - - Method res = new Method(clazz, name, parameterTypes, returnType, - exceptionTypes, modifiers, slot, signature, - annotations, parameterAnnotations, annotationDefault); - res.root = root; - res.methodAccessor = methodAccessor; - return res; - } - /** * @throws InaccessibleObjectException {@inheritDoc} * @throws SecurityException {@inheritDoc} diff --git a/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java b/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java index f815862edf6..835ffef616e 100644 --- a/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java +++ b/src/java.base/share/classes/java/lang/reflect/ReflectAccess.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,65 +25,15 @@ package java.lang.reflect; -import jdk.internal.reflect.MethodAccessor; +import jdk.internal.access.JavaLangReflectAccess; import jdk.internal.reflect.ConstructorAccessor; /** Package-private class implementing the jdk.internal.access.JavaLangReflectAccess interface, allowing the java.lang package to instantiate objects in this package. */ - -class ReflectAccess implements jdk.internal.access.JavaLangReflectAccess { - public Constructor newConstructor(Class declaringClass, - Class[] parameterTypes, - Class[] checkedExceptions, - int modifiers, - int slot, - String signature, - byte[] annotations, - byte[] parameterAnnotations) - { - return new Constructor<>(declaringClass, - parameterTypes, - checkedExceptions, - modifiers, - slot, - signature, - annotations, - parameterAnnotations); - } - - public MethodAccessor getMethodAccessor(Method m) { - return m.getMethodAccessor(); - } - - public void setMethodAccessor(Method m, MethodAccessor accessor) { - m.setMethodAccessor(accessor); - } - - public ConstructorAccessor getConstructorAccessor(Constructor c) { - return c.getConstructorAccessor(); - } - - public void setConstructorAccessor(Constructor c, - ConstructorAccessor accessor) - { - c.setConstructorAccessor(accessor); - } - - public int getConstructorSlot(Constructor c) { - return c.getSlot(); - } - - public String getConstructorSignature(Constructor c) { - return c.getSignature(); - } - - public byte[] getConstructorAnnotations(Constructor c) { - return c.getRawAnnotations(); - } - - public byte[] getConstructorParameterAnnotations(Constructor c) { - return c.getRawParameterAnnotations(); +final class ReflectAccess implements JavaLangReflectAccess { + public Constructor newConstructorWithAccessor(Constructor original, ConstructorAccessor accessor) { + return original.newWithAccessor(accessor); } public byte[] getExecutableTypeAnnotationBytes(Executable ex) { @@ -105,9 +55,6 @@ public Class[] getExecutableSharedExceptionTypes(Executable ex) { public Method copyMethod(Method arg) { return arg.copy(); } - public Method leafCopyMethod(Method arg) { - return arg.leafCopy(); - } public Field copyField(Field arg) { return arg.copy(); diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangReflectAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangReflectAccess.java index f49221e44ed..d0c415d2dc6 100644 --- a/src/java.base/share/classes/jdk/internal/access/JavaLangReflectAccess.java +++ b/src/java.base/share/classes/jdk/internal/access/JavaLangReflectAccess.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,67 +29,29 @@ import jdk.internal.reflect.*; /** An interface which gives privileged packages Java-level access to - internals of java.lang.reflect. */ - + internals of java.lang.reflect. Use as a last resort! */ public interface JavaLangReflectAccess { - /** Creates a new java.lang.reflect.Constructor. Access checks as - per java.lang.reflect.AccessibleObject are not overridden. */ - public Constructor newConstructor(Class declaringClass, - Class[] parameterTypes, - Class[] checkedExceptions, - int modifiers, - int slot, - String signature, - byte[] annotations, - byte[] parameterAnnotations); - - /** Gets the MethodAccessor object for a java.lang.reflect.Method */ - public MethodAccessor getMethodAccessor(Method m); - - /** Sets the MethodAccessor object for a java.lang.reflect.Method */ - public void setMethodAccessor(Method m, MethodAccessor accessor); - - /** Gets the ConstructorAccessor object for a - java.lang.reflect.Constructor */ - public ConstructorAccessor getConstructorAccessor(Constructor c); - - /** Sets the ConstructorAccessor object for a - java.lang.reflect.Constructor */ - public void setConstructorAccessor(Constructor c, - ConstructorAccessor accessor); + /** + * Creates a new root constructor from the original one, with + * a custom accessor. Used by serialization hooks. + */ + Constructor newConstructorWithAccessor(Constructor original, ConstructorAccessor accessor); /** Gets the byte[] that encodes TypeAnnotations on an Executable. */ public byte[] getExecutableTypeAnnotationBytes(Executable ex); - /** Gets the "slot" field from a Constructor (used for serialization) */ - public int getConstructorSlot(Constructor c); - - /** Gets the "signature" field from a Constructor (used for serialization) */ - public String getConstructorSignature(Constructor c); - - /** Gets the "annotations" field from a Constructor (used for serialization) */ - public byte[] getConstructorAnnotations(Constructor c); - - /** Gets the "parameterAnnotations" field from a Constructor (used for serialization) */ - public byte[] getConstructorParameterAnnotations(Constructor c); - /** Gets the shared array of parameter types of an Executable. */ public Class[] getExecutableSharedParameterTypes(Executable ex); /** Gets the shared array of exception types of an Executable. */ public Class[] getExecutableSharedExceptionTypes(Executable ex); - // // Copying routines, needed to quickly fabricate new Field, // Method, and Constructor objects from templates - // /** Makes a "child" copy of a Method */ public Method copyMethod(Method arg); - /** Makes a copy of this non-root a Method */ - public Method leafCopyMethod(Method arg); - /** Makes a "child" copy of a Field */ public Field copyField(Field arg); diff --git a/src/java.base/share/classes/jdk/internal/access/SharedSecrets.java b/src/java.base/share/classes/jdk/internal/access/SharedSecrets.java index 0dacbef993a..5acafe01a89 100644 --- a/src/java.base/share/classes/jdk/internal/access/SharedSecrets.java +++ b/src/java.base/share/classes/jdk/internal/access/SharedSecrets.java @@ -53,7 +53,13 @@ interface and provides the ability to call package-private methods within that package; the object implementing that interface is provided through a third package to which access is restricted. This framework avoids the primary disadvantage of using reflection - for this purpose, namely the loss of compile-time checking. */ + for this purpose, namely the loss of compile-time checking. + *

    + * Usage of these APIs often means bad encapsulation designs, + * increased complexity and lack of sustainability. + * Use this only as a last resort! + * + */ public class SharedSecrets { private static JavaAWTAccess javaAWTAccess; diff --git a/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java b/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java index 2222c810524..9a31c5402d4 100644 --- a/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java +++ b/src/java.base/share/classes/jdk/internal/reflect/ReflectionFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -179,41 +179,6 @@ public ConstructorAccessor newConstructorAccessor(Constructor c) { // // - /** Creates a new java.lang.reflect.Constructor. Access checks as - per java.lang.reflect.AccessibleObject are not overridden. */ - public Constructor newConstructor(Class declaringClass, - Class[] parameterTypes, - Class[] checkedExceptions, - int modifiers, - int slot, - String signature, - byte[] annotations, - byte[] parameterAnnotations) - { - return langReflectAccess.newConstructor(declaringClass, - parameterTypes, - checkedExceptions, - modifiers, - slot, - signature, - annotations, - parameterAnnotations); - } - - /** Gets the ConstructorAccessor object for a - java.lang.reflect.Constructor */ - public ConstructorAccessor getConstructorAccessor(Constructor c) { - return langReflectAccess.getConstructorAccessor(c); - } - - /** Sets the ConstructorAccessor object for a - java.lang.reflect.Constructor */ - public void setConstructorAccessor(Constructor c, - ConstructorAccessor accessor) - { - langReflectAccess.setConstructorAccessor(c, accessor); - } - /** Makes a copy of the passed method. The returned method is a "child" of the passed one; see the comments in Method.java for details. */ @@ -225,10 +190,10 @@ public Method copyMethod(Method arg) { * a "child" but a "sibling" of the Method in arg. Should only be * used on non-root methods. */ public Method leafCopyMethod(Method arg) { - return langReflectAccess.leafCopyMethod(arg); + Method root = langReflectAccess.getRoot(arg); + return langReflectAccess.copyMethod(root); } - /** Makes a copy of the passed field. The returned field is a "child" of the passed one; see the comments in Field.java for details. */ @@ -369,15 +334,6 @@ public final Constructor newConstructorForSerialization(Class cl) { private final Constructor generateConstructor(Class cl, Constructor constructorToCall) { - - Constructor ctor = newConstructor(constructorToCall.getDeclaringClass(), - constructorToCall.getParameterTypes(), - constructorToCall.getExceptionTypes(), - constructorToCall.getModifiers(), - langReflectAccess.getConstructorSlot(constructorToCall), - langReflectAccess.getConstructorSignature(constructorToCall), - langReflectAccess.getConstructorAnnotations(constructorToCall), - langReflectAccess.getConstructorParameterAnnotations(constructorToCall)); ConstructorAccessor acc; if (useOldSerializableConstructor()) { acc = new SerializationConstructorAccessorGenerator(). @@ -386,9 +342,12 @@ private final Constructor generateConstructor(Class cl, constructorToCall.getModifiers(), constructorToCall.getDeclaringClass()); } else { - acc = MethodHandleAccessorFactory.newSerializableConstructorAccessor(cl, ctor); + acc = MethodHandleAccessorFactory.newSerializableConstructorAccessor(cl, constructorToCall); } - setConstructorAccessor(ctor, acc); + // Unlike other root constructors, this constructor is not copied for mutation + // but directly mutated, as it is not cached. To cache this constructor, + // setAccessible call must be done on a copy and return that copy instead. + Constructor ctor = langReflectAccess.newConstructorWithAccessor(constructorToCall, acc); ctor.setAccessible(true); return ctor; } From cafb3dc49157daf12c1a0e5d78acca8188c56918 Mon Sep 17 00:00:00 2001 From: Prasanta Sadhukhan Date: Wed, 21 Aug 2024 07:04:05 +0000 Subject: [PATCH 53/67] 6318027: BasicScrollBarUI does not disable timer when enclosing frame is disabled. Reviewed-by: abhiscxk, tr --- .../com/apple/laf/AquaScrollBarUI.java | 66 +++++-- .../swing/plaf/basic/BasicScrollBarUI.java | 22 ++- .../JScrollBar/DisableFrameFromScrollBar.java | 185 ++++++++++++++++++ 3 files changed, 260 insertions(+), 13 deletions(-) create mode 100644 test/jdk/javax/swing/JScrollBar/DisableFrameFromScrollBar.java diff --git a/src/java.desktop/macosx/classes/com/apple/laf/AquaScrollBarUI.java b/src/java.desktop/macosx/classes/com/apple/laf/AquaScrollBarUI.java index ea239d83a23..f71a15a9787 100644 --- a/src/java.desktop/macosx/classes/com/apple/laf/AquaScrollBarUI.java +++ b/src/java.desktop/macosx/classes/com/apple/laf/AquaScrollBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,19 +25,46 @@ package com.apple.laf; -import java.awt.*; -import java.awt.event.*; -import java.beans.*; -import java.util.*; - -import javax.swing.*; +import java.awt.Adjustable; +import java.awt.Component; +import java.awt.Container; +import java.awt.Dimension; +import java.awt.Graphics; +import java.awt.Insets; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.event.ActionEvent; +import java.awt.event.ActionListener; +import java.awt.event.MouseAdapter; +import java.awt.event.MouseEvent; +import java.awt.event.MouseMotionListener; + +import java.beans.PropertyChangeEvent; +import java.beans.PropertyChangeListener; +import java.util.HashMap; +import java.util.Map; + +import javax.swing.BoundedRangeModel; +import javax.swing.JComponent; +import javax.swing.JFrame; +import javax.swing.JScrollBar; +import javax.swing.LookAndFeel; import javax.swing.Timer; -import javax.swing.event.*; -import javax.swing.plaf.*; - -import apple.laf.*; -import apple.laf.JRSUIConstants.*; +import javax.swing.event.ChangeEvent; +import javax.swing.event.ChangeListener; +import javax.swing.plaf.ComponentUI; +import javax.swing.plaf.ScrollBarUI; + +import apple.laf.JRSUIStateFactory; +import apple.laf.JRSUIConstants.Hit; +import apple.laf.JRSUIConstants.NothingToScroll; +import apple.laf.JRSUIConstants.Orientation; +import apple.laf.JRSUIConstants.ScrollBarHit; +import apple.laf.JRSUIConstants.ScrollBarPart; +import apple.laf.JRSUIConstants.ShowArrows; +import apple.laf.JRSUIConstants.State; import apple.laf.JRSUIState.ScrollBarState; +import apple.laf.JRSUIUtils; import com.apple.laf.AquaUtils.RecyclableSingleton; @@ -527,6 +554,21 @@ void setScrollByBlock(final boolean block) { } public void actionPerformed(final ActionEvent e) { + Component parent = fScrollBar.getParent(); + do { + if (parent instanceof JFrame par) { + if (!par.isEnabled()) { + ((Timer)e.getSource()).stop(); + fScrollBar.setValueIsAdjusting(false); + return; + } + break; + } else { + if (parent != null) { + parent = parent.getParent(); + } + } + } while (parent != null); if (fUseBlockIncrement) { Hit newPart = getPartHit(fTrackListener.fCurrentMouseX, fTrackListener.fCurrentMouseY); diff --git a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicScrollBarUI.java b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicScrollBarUI.java index 6d747f31c08..60274d766cd 100644 --- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicScrollBarUI.java +++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicScrollBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,7 @@ import javax.swing.InputMap; import javax.swing.JButton; import javax.swing.JComponent; +import javax.swing.JFrame; import javax.swing.JList; import javax.swing.JScrollBar; import javax.swing.JScrollPane; @@ -1608,6 +1609,25 @@ public ScrollListener(int dir, boolean block) { /** {@inheritDoc} */ public void actionPerformed(ActionEvent e) { + // If frame is disabled and timer is started in mousePressed + // and mouseReleased is not called, then timer will not be stopped + // Stop the timer if frame is disabled + Component parent = scrollbar.getParent(); + do { + if (parent instanceof JFrame par) { + if (!par.isEnabled()) { + ((Timer)e.getSource()).stop(); + buttonListener.handledEvent = false; + scrollbar.setValueIsAdjusting(false); + return; + } + break; + } else { + if (parent != null) { + parent = parent.getParent(); + } + } + } while (parent != null); if(useBlockIncrement) { scrollByBlock(direction); // Stop scrolling if the thumb catches up with the mouse diff --git a/test/jdk/javax/swing/JScrollBar/DisableFrameFromScrollBar.java b/test/jdk/javax/swing/JScrollBar/DisableFrameFromScrollBar.java new file mode 100644 index 00000000000..ce9fd7e7af8 --- /dev/null +++ b/test/jdk/javax/swing/JScrollBar/DisableFrameFromScrollBar.java @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 6318027 + * @key headful + * @summary Verifies BasicScrollBarUI disables timer when enclosing frame is disabled + * @run main DisableFrameFromScrollBar + */ + +import java.awt.FlowLayout; +import java.awt.Point; +import java.awt.Rectangle; +import java.awt.Robot; +import java.awt.event.AdjustmentEvent; +import java.awt.event.AdjustmentListener; +import java.awt.event.InputEvent; + +import javax.swing.JFrame; +import javax.swing.JScrollBar; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; +import javax.swing.UnsupportedLookAndFeelException; +import javax.swing.event.ChangeEvent; +import javax.swing.event.ChangeListener; + +public class DisableFrameFromScrollBar { + + private static JFrame frame; + private static JScrollBar bar; + private static int oldValue; + private static volatile boolean doCheck; + private static volatile boolean isAdjusting; + + private static void setLookAndFeel(UIManager.LookAndFeelInfo laf) { + try { + UIManager.setLookAndFeel(laf.getClassName()); + } catch (UnsupportedLookAndFeelException ignored) { + System.out.println("Unsupported LAF: " + laf.getClassName()); + } catch (ClassNotFoundException | InstantiationException + | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static void createUI() { + frame = new JFrame(DisableFrameFromScrollBar.class.getName()); + bar = new JScrollBar(); + bar.getModel().addChangeListener(new DisableChangeListener(frame)); + frame.getContentPane().setLayout(new FlowLayout()); + frame.add(bar); + + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setSize(150, 150); + frame.setLocationRelativeTo(null); + frame.setVisible(true); + } + + public static void main(String[] args) throws Exception { + for (UIManager.LookAndFeelInfo laf : UIManager.getInstalledLookAndFeels()) { + System.out.println("Testing LAF : " + laf.getClassName()); + Robot robot = new Robot(); + robot.setAutoDelay(100); + try { + SwingUtilities.invokeAndWait(() -> { + setLookAndFeel(laf); + createUI(); + }); + + robot.waitForIdle(); + robot.delay(1000); + Point point = getClickPoint(); + robot.mouseMove(point.x, point.y); + robot.waitForIdle(); + robot.mousePress(InputEvent.BUTTON1_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON1_DOWN_MASK); + + SwingUtilities.invokeAndWait(() -> { + oldValue = bar.getValue(); + bar.addAdjustmentListener(new AdjustmentListener() { + public void adjustmentValueChanged(AdjustmentEvent e) { + int curValue = e.getValue(); + int extent = bar.getMaximum() - bar.getVisibleAmount(); + if (curValue < extent && curValue != oldValue) { + oldValue = curValue; + isAdjusting = true; + } else { + doCheck = true; + isAdjusting = false; + } + } + }); + }); + do { + Thread.sleep(200); + } while (isAdjusting && !doCheck); + if (bar.getValue() == (bar.getMaximum() - bar.getVisibleAmount())) { + throw new RuntimeException("ScrollBar didn't disable timer"); + } + } finally { + SwingUtilities.invokeAndWait(() -> { + if (frame != null) { + frame.dispose(); + } + }); + } + } + } + + private static Point getClickPoint() throws Exception { + final Point[] result = new Point[1]; + + SwingUtilities.invokeAndWait(new Runnable() { + + @Override + public void run() { + Point p = bar.getLocationOnScreen(); + Rectangle rect = bar.getBounds(); + result[0] = new Point((int) (p.x + rect.width / 2), + (int) (p.y + rect.height - 10)); + } + }); + + return result[0]; + + } + + public static class DisableChangeListener implements ChangeListener { + private final JFrame m_frame; + private boolean m_done; + + public DisableChangeListener(JFrame p_frame) { + m_frame = p_frame; + } + + public void stateChanged(ChangeEvent p_e) { + if (!m_done) { + m_frame.setEnabled(false); + Thread t = new Thread(new Enabler(m_frame)); + t.start(); + m_done = true; + } + } + } + + public static class Enabler implements Runnable { + private JFrame m_frame; + + Enabler(JFrame p_frame) { + m_frame = p_frame; + } + + public void run() { + try { + Thread.sleep(1000); + } + catch (InterruptedException e) { + e.printStackTrace(); + } + m_frame.setEnabled(true); + } + } +} + From 598169756c903bb1f77e35ea32717043bc166e3c Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Wed, 21 Aug 2024 08:17:01 +0000 Subject: [PATCH 54/67] 8337828: CDS: Trim down minimum GC region alignment Reviewed-by: iklam, phh --- src/hotspot/share/cds/archiveHeapWriter.hpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/cds/archiveHeapWriter.hpp b/src/hotspot/share/cds/archiveHeapWriter.hpp index 352aeb9a08f..b337b75402f 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.hpp +++ b/src/hotspot/share/cds/archiveHeapWriter.hpp @@ -112,11 +112,10 @@ class ArchiveHeapWriter : AllStatic { public: static const intptr_t NOCOOPS_REQUESTED_BASE = 0x10000000; - // The minimum region size of all collectors that are supported by CDS in - // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size - // depends on -Xmx, but can never be smaller than 1 * M. - // (TODO: Perhaps change to 256K to be compatible with Shenandoah) - static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M; + // The minimum region size of all collectors that are supported by CDS. + // G1 heap region size can never be smaller than 1M. + // Shenandoah heap region size can never be smaller than 256K. + static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K; private: class EmbeddedOopRelocator; From e88a3b0574c0a6c6acb5faf7b67674d5b7f0797c Mon Sep 17 00:00:00 2001 From: Adam Sotona Date: Wed, 21 Aug 2024 08:19:35 +0000 Subject: [PATCH 55/67] 8338661: StackMapTable is invalid if frames appear in dead code Reviewed-by: liach --- .../classfile/impl/AttributeHolder.java | 8 ++++ .../internal/classfile/impl/StackCounter.java | 23 +++++++++-- test/jdk/jdk/classfile/StackMapsTest.java | 40 ++++++++++++++++--- .../jdk/classfile/CodeAttributeTools.java | 1 + 4 files changed, 63 insertions(+), 9 deletions(-) diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/AttributeHolder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/AttributeHolder.java index ba51474c131..31e1a7f2533 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/AttributeHolder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/AttributeHolder.java @@ -53,6 +53,14 @@ public void writeTo(BufWriterImpl buf) { Util.writeAttributes(buf, attributes); } + @SuppressWarnings("unchecked") + A get(AttributeMapper am) { + for (Attribute a : attributes) + if (a.attributeMapper() == am) + return (A)a; + return null; + } + boolean isPresent(AttributeMapper am) { for (Attribute a : attributes) if (a.attributeMapper() == am) diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java b/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java index 24a27ad5af6..861bb509420 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/StackCounter.java @@ -25,20 +25,23 @@ */ package jdk.internal.classfile.impl; +import java.lang.classfile.Attributes; import java.lang.classfile.TypeKind; +import java.lang.classfile.attribute.StackMapFrameInfo; +import java.lang.classfile.attribute.StackMapTableAttribute; import java.lang.classfile.constantpool.ConstantDynamicEntry; import java.lang.classfile.constantpool.DynamicConstantPoolEntry; import java.lang.classfile.constantpool.MemberRefEntry; +import java.lang.constant.ClassDesc; import java.lang.constant.MethodTypeDesc; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.BitSet; import java.util.List; import java.util.Queue; +import java.util.stream.Collectors; import static java.lang.classfile.ClassFile.*; -import java.lang.constant.ClassDesc; -import java.util.stream.Collectors; public final class StackCounter { @@ -47,6 +50,7 @@ private record Target(int bci, int stack) {} static StackCounter of(DirectCodeBuilder dcb, BufWriterImpl buf) { return new StackCounter( dcb, + dcb.attributes.get(Attributes.stackMapTable()), buf.thisClass().asSymbol(), dcb.methodInfo.methodName().stringValue(), dcb.methodInfo.methodTypeSymbol(), @@ -97,6 +101,7 @@ private boolean next() { } public StackCounter(LabelContext labelContext, + StackMapTableAttribute smta, ClassDesc thisClass, String methodName, MethodTypeDesc methodDesc, @@ -111,8 +116,20 @@ public StackCounter(LabelContext labelContext, this.bytecode = bytecode; this.cp = cp; targets = new ArrayDeque<>(); - maxStack = stack = rets = 0; + stack = rets = 0; + maxStack = handlers.isEmpty() ? 0 : 1; for (var h : handlers) targets.add(new Target(labelContext.labelToBci(h.handler), 1)); + if (smta != null) { + for (var smfi : smta.entries()) { + int frameStack = smfi.stack().size(); + for (var vti : smfi.stack()) { + if (vti == StackMapFrameInfo.SimpleVerificationTypeInfo.ITEM_LONG + || vti == StackMapFrameInfo.SimpleVerificationTypeInfo.ITEM_DOUBLE) frameStack++; + } + if (maxStack < frameStack) maxStack = frameStack; + targets.add(new Target(labelContext.labelToBci(smfi.target()), frameStack)); + } + } maxLocals = isStatic ? 0 : 1; maxLocals += Util.parameterSlots(methodDesc); bcs = new RawBytecodeHelper(bytecode); diff --git a/test/jdk/jdk/classfile/StackMapsTest.java b/test/jdk/jdk/classfile/StackMapsTest.java index 3ee8eaf51ee..09be56f0de2 100644 --- a/test/jdk/jdk/classfile/StackMapsTest.java +++ b/test/jdk/jdk/classfile/StackMapsTest.java @@ -24,18 +24,25 @@ /* * @test * @summary Testing Classfile stack maps generator. - * @bug 8305990 8320222 8320618 8335475 8338623 + * @bug 8305990 8320222 8320618 8335475 8338623 8338661 * @build testdata.* * @run junit StackMapsTest */ import java.lang.classfile.*; import java.lang.classfile.attribute.CodeAttribute; +import java.lang.classfile.attribute.StackMapFrameInfo; +import java.lang.classfile.attribute.StackMapTableAttribute; import java.lang.classfile.components.ClassPrinter; +import java.lang.constant.ClassDesc; +import java.lang.constant.ConstantDescs; +import java.lang.constant.MethodTypeDesc; +import java.lang.reflect.AccessFlag; import java.net.URI; import java.nio.file.FileSystem; import java.nio.file.FileSystems; import java.nio.file.Files; +import java.util.List; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -45,11 +52,6 @@ import static java.lang.classfile.ClassFile.ACC_STATIC; import static java.lang.constant.ConstantDescs.*; -import java.lang.constant.ClassDesc; -import java.lang.constant.ConstantDescs; -import java.lang.constant.MethodTypeDesc; -import java.lang.reflect.AccessFlag; - /** * StackMapsTest */ @@ -368,4 +370,30 @@ void testInvocationCounters(ClassFile.StackMapsOption option) { assertEquals(1, code.maxStack()); } } + + @Test + void testDeadCodeCountersWithCustomSMTA() { + ClassDesc bar = ClassDesc.of("Bar"); + byte[] bytes = ClassFile.of(ClassFile.StackMapsOption.DROP_STACK_MAPS).build(bar, clb -> clb + .withMethodBody( + "foo", MethodTypeDesc.of(ConstantDescs.CD_long), ACC_STATIC, cob -> { + cob.lconst_0().lreturn(); + Label f2 = cob.newBoundLabel(); + cob.lstore(0); + Label f3 = cob.newBoundLabel(); + cob.lload(0).lreturn().with( + StackMapTableAttribute.of(List.of( + StackMapFrameInfo.of(f2, + List.of(), + List.of(StackMapFrameInfo.SimpleVerificationTypeInfo.ITEM_LONG)), + StackMapFrameInfo.of(f3, + List.of(StackMapFrameInfo.SimpleVerificationTypeInfo.ITEM_LONG), + List.of())))); + } + )); + assertEmpty(ClassFile.of().verify(bytes)); + var code = (CodeAttribute) ClassFile.of().parse(bytes).methods().getFirst().code().orElseThrow(); + assertEquals(2, code.maxLocals()); + assertEquals(2, code.maxStack()); + } } diff --git a/test/micro/org/openjdk/bench/jdk/classfile/CodeAttributeTools.java b/test/micro/org/openjdk/bench/jdk/classfile/CodeAttributeTools.java index d2defd6c5ef..4cf578889d1 100644 --- a/test/micro/org/openjdk/bench/jdk/classfile/CodeAttributeTools.java +++ b/test/micro/org/openjdk/bench/jdk/classfile/CodeAttributeTools.java @@ -122,6 +122,7 @@ public void benchmarkStackMapsGenerator(Blackhole bh) { public void benchmarkStackCounter(Blackhole bh) { for (var d : data) bh.consume(new StackCounter( d.labelContext(), + null, d.thisClass(), d.methodName(), d.methodDesc(), From 715fa8f9fe7242e86b985aece3d078b226f53fb9 Mon Sep 17 00:00:00 2001 From: Lutz Schmidt Date: Wed, 21 Aug 2024 08:23:31 +0000 Subject: [PATCH 56/67] 8336498: [macos] [build]: install-file macro may run into permission denied error Reviewed-by: clanger, erikj --- make/common/FileUtils.gmk | 1 + 1 file changed, 1 insertion(+) diff --git a/make/common/FileUtils.gmk b/make/common/FileUtils.gmk index 114f3adefbe..cda5932395f 100644 --- a/make/common/FileUtils.gmk +++ b/make/common/FileUtils.gmk @@ -136,6 +136,7 @@ ifeq ($(call isTargetOs, macosx), true) $(CP) -fRP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'; \ fi if [ -n "`$(XATTR) -ls '$(call DecodeSpace, $@)'`" ]; then \ + $(CHMOD) u+w '$(call DecodeSpace, $@)'; \ $(XATTR) -cs '$(call DecodeSpace, $@)'; \ fi endef From c4cf1e93bb22bf7c65ce1943fff91f74839434df Mon Sep 17 00:00:00 2001 From: Gui Cao Date: Wed, 21 Aug 2024 08:58:40 +0000 Subject: [PATCH 57/67] 8338539: New Object to ObjectMonitor mapping: riscv64 implementation Reviewed-by: fyang, rehn, mli --- .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 2 +- .../cpu/riscv/c2_MacroAssembler_riscv.cpp | 178 +++++++++++------- .../cpu/riscv/c2_MacroAssembler_riscv.hpp | 4 +- src/hotspot/cpu/riscv/interp_masm_riscv.cpp | 2 +- .../cpu/riscv/macroAssembler_riscv.cpp | 9 +- .../cpu/riscv/macroAssembler_riscv.hpp | 2 +- src/hotspot/cpu/riscv/riscv.ad | 16 +- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 2 +- .../share/runtime/basicLock.inline.hpp | 2 +- 9 files changed, 136 insertions(+), 81 deletions(-) diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 547e80c7e47..dc1a3d443ac 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -70,7 +70,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(obj, hdr, temp, t1, slow_case); + lightweight_lock(disp_hdr, obj, hdr, temp, t1, slow_case); } else if (LockingMode == LM_LEGACY) { Label done; // Load object header diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 8322b35e205..a75bfdfc9dc 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -253,12 +253,13 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, // C2 uses the value of flag (0 vs !0) to determine the continuation. } -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Register tmp2, Register tmp3) { +void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { // Flag register, zero for success; non-zero for failure. Register flag = t1; assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, tmp1, tmp2, tmp3, flag, t0); + assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, t0); mv(flag, 1); @@ -269,6 +270,11 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis // Finish fast lock unsuccessfully. slow_path MUST branch to with flag != 0 Label slow_path; + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + sd(zr, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + } + if (DiagnoseSyncOnValueBasedClasses != 0) { load_klass(tmp1, obj); lwu(tmp1, Address(tmp1, Klass::access_flags_offset())); @@ -277,6 +283,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis } const Register tmp1_mark = tmp1; + const Register tmp3_t = tmp3; { // Lightweight locking @@ -284,7 +291,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis Label push; const Register tmp2_top = tmp2; - const Register tmp3_t = tmp3; // Check if lock-stack is full. lwu(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset())); @@ -323,29 +329,67 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis { // Handle inflated monitor. bind(inflated); + const Register tmp1_monitor = tmp1; if (!UseObjectMonitorTable) { - // mark contains the tagged ObjectMonitor*. - const Register tmp1_tagged_monitor = tmp1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; - const Register tmp2_owner_addr = tmp2; - const Register tmp3_owner = tmp3; + assert(tmp1_monitor == tmp1_mark, "should be the same here"); + } else { + Label monitor_found; - // Compute owner address. - la(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); + // Load cache address + la(tmp3_t, Address(xthread, JavaThread::om_cache_oops_offset())); - // CAS owner (null => current thread). - cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64, - /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner); - beqz(tmp3_owner, locked); + const int num_unrolled = 2; + for (int i = 0; i < num_unrolled; i++) { + ld(tmp1, Address(tmp3_t)); + beq(obj, tmp1, monitor_found); + add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference())); + } - // Check if recursive. - bne(tmp3_owner, xthread, slow_path); + Label loop; - // Recursive. - increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1, tmp2, tmp3); - } else { - // OMCache lookup not supported yet. Take the slowpath. + // Search for obj in cache. + bind(loop); + + // Check for match. + ld(tmp1, Address(tmp3_t)); + beq(obj, tmp1, monitor_found); + + // Search until null encountered, guaranteed _null_sentinel at end. + add(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference())); + bnez(tmp1, loop); + // Cache Miss. Take the slowpath. j(slow_path); + + bind(monitor_found); + ld(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference())); + } + + const Register tmp2_owner_addr = tmp2; + const Register tmp3_owner = tmp3; + + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address owner_address(tmp1_monitor, ObjectMonitor::owner_offset() - monitor_tag); + const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag); + + Label monitor_locked; + + // Compute owner address. + la(tmp2_owner_addr, owner_address); + + // CAS owner (null => current thread). + cmpxchg(/*addr*/ tmp2_owner_addr, /*expected*/ zr, /*new*/ xthread, Assembler::int64, + /*acquire*/ Assembler::aq, /*release*/ Assembler::relaxed, /*result*/ tmp3_owner); + beqz(tmp3_owner, monitor_locked); + + // Check if recursive. + bne(tmp3_owner, xthread, slow_path); + + // Recursive. + increment(recursions_address, 1, tmp2, tmp3); + + bind(monitor_locked); + if (UseObjectMonitorTable) { + sd(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); } } @@ -370,18 +414,18 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register tmp1, Regis // C2 uses the value of flag (0 vs !0) to determine the continuation. } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Register tmp2, - Register tmp3) { +void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, + Register tmp1, Register tmp2, Register tmp3) { // Flag register, zero for success; non-zero for failure. Register flag = t1; assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, tmp1, tmp2, tmp3, flag, t0); + assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, t0); mv(flag, 1); // Handle inflated monitor. - Label inflated, inflated_load_monitor; + Label inflated, inflated_load_mark; // Finish fast unlock successfully. unlocked MUST branch to with flag == 0 Label unlocked; // Finish fast unlock unsuccessfully. MUST branch to with flag != 0 @@ -392,6 +436,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg const Register tmp3_t = tmp3; { // Lightweight unlock + Label push_and_slow_path; // Check if obj is top of lock-stack. lwu(tmp2_top, Address(xthread, JavaThread::lock_stack_top_offset())); @@ -399,7 +444,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg add(tmp3_t, xthread, tmp2_top); ld(tmp3_t, Address(tmp3_t)); // Top of lock stack was not obj. Must be monitor. - bne(obj, tmp3_t, inflated_load_monitor); + bne(obj, tmp3_t, inflated_load_mark); // Pop lock-stack. DEBUG_ONLY(add(tmp3_t, xthread, tmp2_top);) @@ -416,8 +461,11 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg ld(tmp1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); // Check header for monitor (0b10). + // Because we got here by popping (meaning we pushed in locked) + // there will be no monitor in the box. So we need to push back the obj + // so that the runtime can fix any potential anonymous owner. test_bit(tmp3_t, tmp1_mark, exact_log2(markWord::monitor_value)); - bnez(tmp3_t, inflated); + bnez(tmp3_t, UseObjectMonitorTable ? push_and_slow_path : inflated); // Try to unlock. Transition lock bits 0b00 => 0b01 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); @@ -426,6 +474,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg /*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, /*result*/ tmp3_t); beq(tmp1_mark, tmp3_t, unlocked); + bind(push_and_slow_path); // Compare and exchange failed. // Restore lock-stack and handle the unlock in runtime. DEBUG_ONLY(add(tmp3_t, xthread, tmp2_top);) @@ -436,7 +485,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg } { // Handle inflated monitor. - bind(inflated_load_monitor); + bind(inflated_load_mark); ld(tmp1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); #ifdef ASSERT test_bit(tmp3_t, tmp1_mark, exact_log2(markWord::monitor_value)); @@ -458,54 +507,55 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register tmp1, Reg bind(check_done); #endif - if (!UseObjectMonitorTable) { - // mark contains the tagged ObjectMonitor*. - const Register tmp1_monitor = tmp1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; + const Register tmp1_monitor = tmp1; + if (!UseObjectMonitorTable) { + assert(tmp1_monitor == tmp1_mark, "should be the same here"); // Untag the monitor. - sub(tmp1_monitor, tmp1_mark, monitor_tag); + add(tmp1_monitor, tmp1_mark, -(int)markWord::monitor_value); + } else { + ld(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + // No valid pointer below alignof(ObjectMonitor*). Take the slow path. + mv(tmp3_t, alignof(ObjectMonitor*)); + bltu(tmp1_monitor, tmp3_t, slow_path); + } - const Register tmp2_recursions = tmp2; - Label not_recursive; + const Register tmp2_recursions = tmp2; + Label not_recursive; - // Check if recursive. - ld(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - beqz(tmp2_recursions, not_recursive); + // Check if recursive. + ld(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + beqz(tmp2_recursions, not_recursive); - // Recursive unlock. - addi(tmp2_recursions, tmp2_recursions, -1); - sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - j(unlocked); + // Recursive unlock. + addi(tmp2_recursions, tmp2_recursions, -1); + sd(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + j(unlocked); - bind(not_recursive); + bind(not_recursive); - Label release; - const Register tmp2_owner_addr = tmp2; + Label release; + const Register tmp2_owner_addr = tmp2; - // Compute owner address. - la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); + // Compute owner address. + la(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); - // Check if the entry lists are empty. - ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); - ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); - orr(t0, t0, tmp3_t); - beqz(t0, release); + // Check if the entry lists are empty. + ld(t0, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); + ld(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); + orr(t0, t0, tmp3_t); + beqz(t0, release); - // The owner may be anonymous and we removed the last obj entry in - // the lock-stack. This loses the information about the owner. - // Write the thread to the owner field so the runtime knows the owner. - sd(xthread, Address(tmp2_owner_addr)); - j(slow_path); + // The owner may be anonymous and we removed the last obj entry in + // the lock-stack. This loses the information about the owner. + // Write the thread to the owner field so the runtime knows the owner. + sd(xthread, Address(tmp2_owner_addr)); + j(slow_path); - bind(release); - // Set owner to null. - membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); - sd(zr, Address(tmp2_owner_addr)); - } else { - // OMCache lookup not supported yet. Take the slowpath. - j(slow_path); - } + bind(release); + // Set owner to null. + membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); + sd(zr, Address(tmp2_owner_addr)); } bind(unlocked); diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 07041fe0850..4d7f756923c 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -47,8 +47,8 @@ void fast_lock(Register object, Register box, Register tmp1, Register tmp2, Register tmp3); void fast_unlock(Register object, Register box, Register tmp1, Register tmp2); // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(Register object, Register tmp1, Register tmp2, Register tmp3); - void fast_unlock_lightweight(Register object, Register tmp1, Register tmp2, Register tmp3); + void fast_lock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3); + void fast_unlock_lightweight(Register object, Register box, Register tmp1, Register tmp2, Register tmp3); void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result, diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 17b75b30264..06b7b780d13 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -756,7 +756,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case); + lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case); j(count); } else if (LockingMode == LM_LEGACY) { // Load (object->mark() | 1) into swap_reg diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 32ccba6b0ce..c5516336ebc 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -5792,9 +5792,9 @@ void MacroAssembler::test_bit(Register Rd, Register Rs, uint32_t bit_pos) { // - obj: the object to be locked // - tmp1, tmp2, tmp3: temporary registers, will be destroyed // - slow: branched to if locking fails -void MacroAssembler::lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { +void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); - assert_different_registers(obj, tmp1, tmp2, tmp3, t0); + assert_different_registers(basic_lock, obj, tmp1, tmp2, tmp3, t0); Label push; const Register top = tmp1; @@ -5805,6 +5805,11 @@ void MacroAssembler::lightweight_lock(Register obj, Register tmp1, Register tmp2 // instruction emitted as it is part of C1's null check semantics. ld(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + sd(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); + } + // Check if the lock-stack is full. lwu(top, Address(xthread, JavaThread::lock_stack_top_offset())); mv(t, (unsigned)LockStack::end_offset()); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 3c1add90367..b0404929f46 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -1602,7 +1602,7 @@ class MacroAssembler: public Assembler { void store_conditional(Register dst, Register new_val, Register addr, enum operand_size size, Assembler::Aqrl release); public: - void lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); + void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); public: diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index d3e2b0549e9..db010c9c6c8 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -10553,33 +10553,33 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iR ins_pipe(pipe_serial); %} -instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP_R10 box, iRegPNoSp tmp1, iRegPNoSp tmp2) +instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3) %{ predicate(LockingMode == LM_LIGHTWEIGHT); match(Set cr (FastLock object box)); - effect(TEMP tmp1, TEMP tmp2, USE_KILL box); + effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); ins_cost(10 * DEFAULT_COST); - format %{ "fastlock $object,$box\t! kills $box,$tmp1,$tmp2 #@cmpFastLockLightweight" %} + format %{ "fastlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastLockLightweight" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); + __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); %} -instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP_R10 box, iRegPNoSp tmp1, iRegPNoSp tmp2) +instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp1, iRegPNoSp tmp2, iRegPNoSp tmp3) %{ predicate(LockingMode == LM_LIGHTWEIGHT); match(Set cr (FastUnlock object box)); - effect(TEMP tmp1, TEMP tmp2, USE_KILL box); + effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); ins_cost(10 * DEFAULT_COST); - format %{ "fastunlock $object,$box\t! kills $box,$tmp1,$tmp2, #@cmpFastUnlockLightweight" %} + format %{ "fastunlock $object,$box\t! kills $tmp1,$tmp2,$tmp3 #@cmpFastUnlockLightweight" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register); + __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe(pipe_serial); diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index bed24e442e8..ffd904aed47 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -1702,7 +1702,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ bnez(swap_reg, slow_path_lock); } else { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - __ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); + __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock); } __ bind(count); diff --git a/src/hotspot/share/runtime/basicLock.inline.hpp b/src/hotspot/share/runtime/basicLock.inline.hpp index fb1cee8de8f..c04c8e5b117 100644 --- a/src/hotspot/share/runtime/basicLock.inline.hpp +++ b/src/hotspot/share/runtime/basicLock.inline.hpp @@ -39,7 +39,7 @@ inline void BasicLock::set_displaced_header(markWord header) { inline ObjectMonitor* BasicLock::object_monitor_cache() const { assert(UseObjectMonitorTable, "must be"); -#if defined(X86) || defined(AARCH64) +#if defined(X86) || defined(AARCH64) || defined(RISCV64) return reinterpret_cast(get_metadata()); #else // Other platforms do not make use of the cache yet, From 7458952dedc0a34b5c7f3e9e228f9b18e08f19e3 Mon Sep 17 00:00:00 2001 From: Hamlin Li Date: Wed, 21 Aug 2024 10:17:51 +0000 Subject: [PATCH 58/67] 8338595: Add more linesize for MIME decoder in macro bench test Base64Decode Reviewed-by: rehn --- test/micro/org/openjdk/bench/java/util/Base64Decode.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/micro/org/openjdk/bench/java/util/Base64Decode.java b/test/micro/org/openjdk/bench/java/util/Base64Decode.java index 2e399efc973..2e68563beda 100644 --- a/test/micro/org/openjdk/bench/java/util/Base64Decode.java +++ b/test/micro/org/openjdk/bench/java/util/Base64Decode.java @@ -50,7 +50,7 @@ public class Base64Decode { "112", "512", "1000", "20000", "50000"}) private int maxNumBytes; - @Param({"4"}) + @Param({"4", "32", "76", "128"}) private int lineSize; private byte[] lineSeparator = {'\r', '\n'}; From 80adea8e0ab3753c3623267c6a2bd3eaed69ad29 Mon Sep 17 00:00:00 2001 From: Chen Liang Date: Wed, 21 Aug 2024 11:58:21 +0000 Subject: [PATCH 59/67] 8338545: Functional interface implementations for common pre-boot ClassFile operations Reviewed-by: asotona --- .../java/lang/classfile/ClassBuilder.java | 8 ++-- .../java/lang/invoke/ClassSpecializer.java | 27 +++++------- .../invoke/InnerClassLambdaMetafactory.java | 44 +++++++------------ .../lang/invoke/InvokerBytecodeGenerator.java | 27 +++--------- .../classfile/impl/BufferedCodeBuilder.java | 7 +-- .../classfile/impl/BufferedFieldBuilder.java | 7 +-- .../classfile/impl/BufferedMethodBuilder.java | 7 +-- .../jdk/internal/classfile/impl/CodeImpl.java | 7 +-- .../internal/classfile/impl/FieldImpl.java | 7 +-- .../internal/classfile/impl/MethodImpl.java | 8 +--- .../jdk/internal/classfile/impl/Util.java | 33 ++++++++++++++ 11 files changed, 76 insertions(+), 106 deletions(-) diff --git a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java index 98371c9e15a..1d5bb271dbe 100644 --- a/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java +++ b/src/java.base/share/classes/java/lang/classfile/ClassBuilder.java @@ -165,7 +165,7 @@ ClassBuilder withField(Utf8Entry name, default ClassBuilder withField(Utf8Entry name, Utf8Entry descriptor, int flags) { - return withField(name, descriptor, fb -> fb.withFlags(flags)); + return withField(name, descriptor, Util.buildingFlags(flags)); } /** @@ -194,7 +194,7 @@ default ClassBuilder withField(String name, default ClassBuilder withField(String name, ClassDesc descriptor, int flags) { - return withField(name, descriptor, fb -> fb.withFlags(flags)); + return withField(name, descriptor, Util.buildingFlags(flags)); } /** @@ -241,7 +241,7 @@ default ClassBuilder withMethodBody(Utf8Entry name, Utf8Entry descriptor, int methodFlags, Consumer handler) { - return withMethod(name, descriptor, methodFlags, mb -> mb.withCode(handler)); + return withMethod(name, descriptor, methodFlags, Util.buildingCode(handler)); } /** @@ -276,7 +276,7 @@ default ClassBuilder withMethodBody(String name, MethodTypeDesc descriptor, int methodFlags, Consumer handler) { - return withMethod(name, descriptor, methodFlags, mb -> mb.withCode(handler)); + return withMethod(name, descriptor, methodFlags, Util.buildingCode(handler)); } /** diff --git a/src/java.base/share/classes/java/lang/invoke/ClassSpecializer.java b/src/java.base/share/classes/java/lang/invoke/ClassSpecializer.java index 1df3b3c6628..94a271780c3 100644 --- a/src/java.base/share/classes/java/lang/invoke/ClassSpecializer.java +++ b/src/java.base/share/classes/java/lang/invoke/ClassSpecializer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ import java.lang.constant.ClassDesc; import java.lang.constant.MethodTypeDesc; import java.lang.invoke.LambdaForm.BasicType; -import java.lang.invoke.InnerClassLambdaMetafactory.MethodBody; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Modifier; @@ -68,8 +67,6 @@ abstract class ClassSpecializer.SpeciesDat private static final ClassDesc CD_LambdaForm = ReferenceClassDescImpl.ofValidated("Ljava/lang/invoke/LambdaForm;"); private static final ClassDesc CD_BoundMethodHandle = ReferenceClassDescImpl.ofValidated("Ljava/lang/invoke/BoundMethodHandle;"); - private static final Consumer STATIC_FIELD_FLAGS = new InnerClassLambdaMetafactory.FieldFlags(ACC_STATIC); - private static final Consumer FINAL_FIELD_FLAGS = new InnerClassLambdaMetafactory.FieldFlags(ACC_FINAL); private final Class topClass; private final Class keyType; @@ -625,7 +622,7 @@ public void accept(ClassBuilder clb) { .with(SourceFileAttribute.of(classDesc.displayName())) // emit static types and BMH_SPECIES fields - .withField(sdFieldName, CD_SPECIES_DATA, STATIC_FIELD_FLAGS); + .withField(sdFieldName, CD_SPECIES_DATA, ACC_STATIC); // handy holder for dealing with groups of typed values (ctor arguments and fields) class Var { @@ -709,26 +706,26 @@ void emitLoadInstruction(CodeBuilder cob) { // emit bound argument fields for (Var field : fields) { - clb.withField(field.name, field.desc, FINAL_FIELD_FLAGS); + clb.withField(field.name, field.desc, ACC_FINAL); } // emit implementation of speciesData() - clb.withMethod(SPECIES_DATA_NAME, MTD_SPECIES_DATA, (SPECIES_DATA_MODS & ACC_PPP) | ACC_FINAL, - new MethodBody(new Consumer() { + clb.withMethodBody(SPECIES_DATA_NAME, MTD_SPECIES_DATA, (SPECIES_DATA_MODS & ACC_PPP) | ACC_FINAL, + new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.getstatic(classDesc, sdFieldName, CD_SPECIES_DATA) .areturn(); } - })); + }); // figure out the constructor arguments MethodType superCtorType = ClassSpecializer.this.baseConstructorType(); MethodType thisCtorType = superCtorType.appendParameterTypes(fieldTypes); // emit constructor - clb.withMethod(INIT_NAME, methodDesc(thisCtorType), ACC_PRIVATE, - new MethodBody(new Consumer() { + clb.withMethodBody(INIT_NAME, methodDesc(thisCtorType), ACC_PRIVATE, + new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.aload(0); // this @@ -753,12 +750,12 @@ public void accept(CodeBuilder cob) { cob.return_(); } - })); + }); // emit make() ...factory method wrapping constructor MethodType ftryType = thisCtorType.changeReturnType(topClass()); - clb.withMethod("make", methodDesc(ftryType), ACC_STATIC, - new MethodBody(new Consumer() { + clb.withMethodBody("make", methodDesc(ftryType), ACC_STATIC, + new Consumer<>() { @Override public void accept(CodeBuilder cob) { // make instance @@ -773,7 +770,7 @@ public void accept(CodeBuilder cob) { cob.invokespecial(classDesc, INIT_NAME, methodDesc(thisCtorType)) .areturn(); } - })); + }); // For each transform, emit the customized override of the transform method. // This method mixes together some incoming arguments (from the transform's diff --git a/src/java.base/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java b/src/java.base/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java index ce2547710a5..24c97d7f7dc 100644 --- a/src/java.base/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java +++ b/src/java.base/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java @@ -72,20 +72,6 @@ private static final String[] EMPTY_STRING_ARRAY = new String[0]; private static final ClassDesc[] EMPTY_CLASSDESC_ARRAY = ConstantUtils.EMPTY_CLASSDESC; - // Static builders to avoid lambdas - record FieldFlags(int flags) implements Consumer { - @Override - public void accept(FieldBuilder fb) { - fb.withFlags(flags); - } - }; - record MethodBody(Consumer code) implements Consumer { - @Override - public void accept(MethodBuilder mb) { - mb.withCode(code); - } - }; - // For dumping generated classes to disk, for debugging purposes private static final ClassFileDumper lambdaProxyClassFileDumper; @@ -324,7 +310,7 @@ public void accept(ClassBuilder clb) { .withInterfaceSymbols(interfaces); // Generate final fields to be filled in by constructor for (int i = 0; i < argDescs.length; i++) { - clb.withField(argNames[i], argDescs[i], new FieldFlags(ACC_PRIVATE | ACC_FINAL)); + clb.withField(argNames[i], argDescs[i], ACC_PRIVATE | ACC_FINAL); } generateConstructor(clb); @@ -334,7 +320,7 @@ public void accept(ClassBuilder clb) { } // Forward the SAM method - clb.withMethod(interfaceMethodName, + clb.withMethodBody(interfaceMethodName, methodDesc(interfaceMethodType), ACC_PUBLIC, forwardingMethod(interfaceMethodType)); @@ -342,7 +328,7 @@ public void accept(ClassBuilder clb) { // Forward the bridges if (altMethods != null) { for (MethodType mt : altMethods) { - clb.withMethod(interfaceMethodName, + clb.withMethodBody(interfaceMethodName, methodDesc(mt), ACC_PUBLIC | ACC_BRIDGE, forwardingMethod(mt)); @@ -376,10 +362,10 @@ private void generateClassInitializer(ClassBuilder clb) { ClassDesc lambdaTypeDescriptor = classDesc(factoryType.returnType()); // Generate the static final field that holds the lambda singleton - clb.withField(LAMBDA_INSTANCE_FIELD, lambdaTypeDescriptor, new FieldFlags(ACC_PRIVATE | ACC_STATIC | ACC_FINAL)); + clb.withField(LAMBDA_INSTANCE_FIELD, lambdaTypeDescriptor, ACC_PRIVATE | ACC_STATIC | ACC_FINAL); // Instantiate the lambda and store it to the static final field - clb.withMethod(CLASS_INIT_NAME, MTD_void, ACC_STATIC, new MethodBody(new Consumer() { + clb.withMethodBody(CLASS_INIT_NAME, MTD_void, ACC_STATIC, new Consumer<>() { @Override public void accept(CodeBuilder cob) { assert factoryType.parameterCount() == 0; @@ -389,7 +375,7 @@ public void accept(CodeBuilder cob) { .putstatic(lambdaClassDesc, LAMBDA_INSTANCE_FIELD, lambdaTypeDescriptor) .return_(); } - })); + }); } /** @@ -397,8 +383,8 @@ public void accept(CodeBuilder cob) { */ private void generateConstructor(ClassBuilder clb) { // Generate constructor - clb.withMethod(INIT_NAME, constructorTypeDesc, ACC_PRIVATE, - new MethodBody(new Consumer() { + clb.withMethodBody(INIT_NAME, constructorTypeDesc, ACC_PRIVATE, + new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.aload(0) @@ -412,7 +398,7 @@ public void accept(CodeBuilder cob) { } cob.return_(); } - })); + }); } private static class SerializationSupport { @@ -439,8 +425,8 @@ private static class SerializationSupport { * Generate a writeReplace method that supports serialization */ private void generateSerializationFriendlyMethods(ClassBuilder clb) { - clb.withMethod(SerializationSupport.NAME_METHOD_WRITE_REPLACE, SerializationSupport.MTD_Object, ACC_PRIVATE | ACC_FINAL, - new MethodBody(new Consumer() { + clb.withMethodBody(SerializationSupport.NAME_METHOD_WRITE_REPLACE, SerializationSupport.MTD_Object, ACC_PRIVATE | ACC_FINAL, + new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.new_(SerializationSupport.CD_SerializedLambda) @@ -468,7 +454,7 @@ public void accept(CodeBuilder cob) { SerializationSupport.MTD_CTOR_SERIALIZED_LAMBDA) .areturn(); } - })); + }); } /** @@ -504,8 +490,8 @@ public void accept(CodeBuilder cob) { * This method generates a method body which calls the lambda implementation * method, converting arguments, as needed. */ - Consumer forwardingMethod(MethodType methodType) { - return new MethodBody(new Consumer() { + Consumer forwardingMethod(MethodType methodType) { + return new Consumer<>() { @Override public void accept(CodeBuilder cob) { if (implKind == MethodHandleInfo.REF_newInvokeSpecial) { @@ -542,7 +528,7 @@ public void accept(CodeBuilder cob) { TypeConvertingMethodAdapter.convertType(cob, implReturnClass, samReturnClass, samReturnClass); cob.return_(TypeKind.from(samReturnClass)); } - }); + }; } private void convertArgumentTypes(CodeBuilder cob, MethodType samType) { diff --git a/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java b/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java index 6d71296c134..17cf699c536 100644 --- a/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java +++ b/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,21 +83,6 @@ class InvokerBytecodeGenerator { private static final String CLASS_PREFIX = "java/lang/invoke/LambdaForm$"; private static final String SOURCE_PREFIX = "LambdaForm$"; - // Static builders to avoid lambdas - private static final Consumer STATIC_FINAL_FIELD = new Consumer() { - @Override - public void accept(FieldBuilder fb) { - fb.withFlags(ACC_STATIC | ACC_FINAL); - } - }; - - record MethodBody(Consumer code) implements Consumer { - @Override - public void accept(MethodBuilder mb) { - mb.withCode(code); - } - }; - /** Name of its super class*/ static final ClassDesc INVOKER_SUPER_DESC = CD_Object; @@ -328,10 +313,10 @@ static void clinit(ClassBuilder clb, ClassDesc classDesc, List classD for (ClassData p : classData) { // add the static field - clb.withField(p.name, p.desc, STATIC_FINAL_FIELD); + clb.withField(p.name, p.desc, ACC_STATIC | ACC_FINAL); } - clb.withMethod(CLASS_INIT_NAME, MTD_void, ACC_STATIC, new MethodBody(new Consumer() { + clb.withMethodBody(CLASS_INIT_NAME, MTD_void, ACC_STATIC, new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.loadConstant(classDesc) @@ -356,7 +341,7 @@ public void accept(CodeBuilder cob) { } cob.return_(); } - })); + }); } private void emitLoadInsn(CodeBuilder cob, TypeKind type, int index) { @@ -1671,14 +1656,14 @@ public void accept(CodeBuilder cob) { */ private void bogusMethod(ClassBuilder clb, Object os) { if (dumper().isEnabled()) { - clb.withMethod("dummy", MTD_void, ACC_STATIC, new MethodBody(new Consumer() { + clb.withMethodBody("dummy", MTD_void, ACC_STATIC, new Consumer<>() { @Override public void accept(CodeBuilder cob) { cob.loadConstant(os.toString()); cob.pop(); cob.return_(); } - })); + }); } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedCodeBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedCodeBuilder.java index b690f8dbfe7..c506d265f68 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedCodeBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedCodeBuilder.java @@ -173,12 +173,7 @@ public Optional parent() { @Override public void writeTo(DirectMethodBuilder builder) { - builder.withCode(new Consumer<>() { - @Override - public void accept(CodeBuilder cb) { - forEach(cb); - } - }); + builder.withCode(Util.writingAll(this)); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedFieldBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedFieldBuilder.java index d165ddd3928..8cf274d746c 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedFieldBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedFieldBuilder.java @@ -104,12 +104,7 @@ public Utf8Entry fieldType() { @Override public void writeTo(DirectClassBuilder builder) { - builder.withField(name, desc, new Consumer<>() { - @Override - public void accept(FieldBuilder fieldBuilder) { - elements.forEach(fieldBuilder); - } - }); + builder.withField(name, desc, Util.writingAll(this)); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java index 4a9e9b111ce..84ddd09b990 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/BufferedMethodBuilder.java @@ -205,12 +205,7 @@ public Optional code() { @Override public void writeTo(DirectClassBuilder builder) { - builder.withMethod(methodName(), methodType(), methodFlags(), new Consumer<>() { - @Override - public void accept(MethodBuilder mb) { - forEach(mb); - } - }); + builder.withMethod(methodName(), methodType(), methodFlags(), Util.writingAll(this)); } @Override diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/CodeImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/CodeImpl.java index d4719fad0b1..8be7e92f5b6 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/CodeImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/CodeImpl.java @@ -147,12 +147,7 @@ public void writeTo(BufWriterImpl buf) { } else { DirectCodeBuilder.build((MethodInfo) enclosingMethod, - new Consumer() { - @Override - public void accept(CodeBuilder cb) { - forEach(cb); - } - }, + Util.writingAll(this), (SplitConstantPool)buf.constantPool(), buf.context(), null).writeTo(buf); diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/FieldImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/FieldImpl.java index a71593a5712..30bb8136e45 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/FieldImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/FieldImpl.java @@ -99,12 +99,7 @@ public void writeTo(DirectClassBuilder builder) { builder.withField(this); } else { - builder.withField(fieldName(), fieldType(), new Consumer<>() { - @Override - public void accept(FieldBuilder fb) { - FieldImpl.this.forEach(fb); - } - }); + builder.withField(fieldName(), fieldType(), Util.writingAll(this)); } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java b/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java index 032b18600a8..05de881ba29 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/MethodImpl.java @@ -136,13 +136,7 @@ public void writeTo(DirectClassBuilder builder) { builder.withMethod(this); } else { - builder.withMethod(methodName(), methodType(), methodFlags(), - new Consumer<>() { - @Override - public void accept(MethodBuilder mb) { - MethodImpl.this.forEach(mb); - } - }); + builder.withMethod(methodName(), methodType(), methodFlags(), Util.writingAll(this)); } } diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java b/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java index 079ac9551ab..1ff80d76676 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/Util.java @@ -24,7 +24,10 @@ */ package jdk.internal.classfile.impl; +import java.lang.classfile.CodeBuilder; import java.lang.classfile.CustomAttribute; +import java.lang.classfile.FieldBuilder; +import java.lang.classfile.MethodBuilder; import java.lang.classfile.PseudoInstruction; import java.lang.classfile.constantpool.PoolEntry; import java.lang.constant.ClassDesc; @@ -63,6 +66,36 @@ public class Util { private Util() { } + public static Consumer> writingAll(Iterable container) { + record ForEachConsumer(Iterable container) implements Consumer> { + @Override + public void accept(Consumer consumer) { + container.forEach(consumer); + } + } + return new ForEachConsumer<>(container); + } + + public static Consumer buildingCode(Consumer codeHandler) { + record WithCodeMethodHandler(Consumer codeHandler) implements Consumer { + @Override + public void accept(MethodBuilder builder) { + builder.withCode(codeHandler); + } + } + return new WithCodeMethodHandler(codeHandler); + } + + public static Consumer buildingFlags(int flags) { + record WithFlagFieldHandler(int flags) implements Consumer { + @Override + public void accept(FieldBuilder builder) { + builder.withFlags(flags); + } + } + return new WithFlagFieldHandler(flags); + } + private static final int ATTRIBUTE_STABILITY_COUNT = AttributeMapper.AttributeStability.values().length; public static boolean isAttributeAllowed(final Attribute attr, From 918cf114548d0098cf6a8a50032b78ee04d453db Mon Sep 17 00:00:00 2001 From: Albert Mingkun Yang Date: Wed, 21 Aug 2024 12:01:57 +0000 Subject: [PATCH 60/67] 8338490: Serial: Move Generation::print_on to subclasses Reviewed-by: gli --- src/hotspot/share/gc/serial/defNewGeneration.cpp | 15 +++++++++------ src/hotspot/share/gc/serial/defNewGeneration.hpp | 3 +-- src/hotspot/share/gc/serial/generation.cpp | 12 ------------ src/hotspot/share/gc/serial/generation.hpp | 7 ------- src/hotspot/share/gc/serial/serialHeap.cpp | 16 ++++++++-------- .../share/gc/serial/tenuredGeneration.cpp | 10 +++++++++- .../share/gc/serial/tenuredGeneration.hpp | 3 +-- 7 files changed, 28 insertions(+), 38 deletions(-) diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index 047171a5eb3..f3b3c8952b9 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -846,7 +846,15 @@ void DefNewGeneration::verify() { } void DefNewGeneration::print_on(outputStream* st) const { - Generation::print_on(st); + st->print(" %-10s", name()); + + st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", + capacity()/K, used()/K); + st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", + p2i(_virtual_space.low_boundary()), + p2i(_virtual_space.high()), + p2i(_virtual_space.high_boundary())); + st->print(" eden"); eden()->print_on(st); st->print(" from"); @@ -855,11 +863,6 @@ void DefNewGeneration::print_on(outputStream* st) const { to()->print_on(st); } - -const char* DefNewGeneration::name() const { - return "def new generation"; -} - HeapWord* DefNewGeneration::allocate(size_t word_size) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index c5b7c095ac4..e86ea6b9747 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -234,8 +234,7 @@ class DefNewGeneration: public Generation { void update_counters(); // Printing - virtual const char* name() const; - virtual const char* short_name() const { return "DefNew"; } + const char* name() const { return "DefNew"; } void print_on(outputStream* st) const; diff --git a/src/hotspot/share/gc/serial/generation.cpp b/src/hotspot/share/gc/serial/generation.cpp index 5d3b7fe2fe3..b15b071d710 100644 --- a/src/hotspot/share/gc/serial/generation.cpp +++ b/src/hotspot/share/gc/serial/generation.cpp @@ -58,15 +58,3 @@ Generation::Generation(ReservedSpace rs, size_t initial_size) : size_t Generation::max_capacity() const { return reserved().byte_size(); } - -void Generation::print() const { print_on(tty); } - -void Generation::print_on(outputStream* st) const { - st->print(" %-20s", name()); - st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", - capacity()/K, used()/K); - st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", - p2i(_virtual_space.low_boundary()), - p2i(_virtual_space.high()), - p2i(_virtual_space.high_boundary())); -} diff --git a/src/hotspot/share/gc/serial/generation.hpp b/src/hotspot/share/gc/serial/generation.hpp index c6a9f94a870..e13b42956e1 100644 --- a/src/hotspot/share/gc/serial/generation.hpp +++ b/src/hotspot/share/gc/serial/generation.hpp @@ -103,13 +103,6 @@ class Generation: public CHeapObj { return _reserved.contains(p); } - // Printing - virtual const char* name() const = 0; - virtual const char* short_name() const = 0; - - virtual void print() const; - virtual void print_on(outputStream* st) const; - virtual void verify() = 0; public: diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 3c481775541..9dcfb5b6092 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -882,12 +882,12 @@ void SerialHeap::verify(VerifyOption option /* ignored */) { } void SerialHeap::print_on(outputStream* st) const { - if (_young_gen != nullptr) { - _young_gen->print_on(st); - } - if (_old_gen != nullptr) { - _old_gen->print_on(st); - } + assert(_young_gen != nullptr, "precondition"); + assert(_old_gen != nullptr, "precondition"); + + _young_gen->print_on(st); + _old_gen->print_on(st); + MetaspaceUtils::print_on(st); } @@ -908,7 +908,7 @@ void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { log_info(gc, heap)(HEAP_CHANGE_FORMAT" " HEAP_CHANGE_FORMAT" " HEAP_CHANGE_FORMAT, - HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(), + HEAP_CHANGE_FORMAT_ARGS(def_new_gen->name(), pre_gc_values.young_gen_used(), pre_gc_values.young_gen_capacity(), def_new_gen->used(), @@ -924,7 +924,7 @@ void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const { def_new_gen->from()->used(), def_new_gen->from()->capacity())); log_info(gc, heap)(HEAP_CHANGE_FORMAT, - HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(), + HEAP_CHANGE_FORMAT_ARGS(old_gen()->name(), pre_gc_values.old_gen_used(), pre_gc_values.old_gen_capacity(), old_gen()->used(), diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.cpp b/src/hotspot/share/gc/serial/tenuredGeneration.cpp index febc4713d03..99031c379d8 100644 --- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp +++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp @@ -440,7 +440,15 @@ void TenuredGeneration::verify() { } void TenuredGeneration::print_on(outputStream* st) const { - Generation::print_on(st); + st->print(" %-10s", name()); + + st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", + capacity()/K, used()/K); + st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", + p2i(_virtual_space.low_boundary()), + p2i(_virtual_space.high()), + p2i(_virtual_space.high_boundary())); + st->print(" the"); _the_space->print_on(st); } diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.hpp b/src/hotspot/share/gc/serial/tenuredGeneration.hpp index fc0578d4e4f..88bfe6ecf46 100644 --- a/src/hotspot/share/gc/serial/tenuredGeneration.hpp +++ b/src/hotspot/share/gc/serial/tenuredGeneration.hpp @@ -121,8 +121,7 @@ class TenuredGeneration: public Generation { CardTableRS* remset); // Printing - const char* name() const { return "tenured generation"; } - const char* short_name() const { return "Tenured"; } + const char* name() const { return "Tenured"; } // Iteration void object_iterate(ObjectClosure* blk); From 3aeb6733f958bc2b0132494b8ac51a4cfa6b98de Mon Sep 17 00:00:00 2001 From: Shaojin Wen Date: Wed, 21 Aug 2024 14:56:30 +0000 Subject: [PATCH 61/67] 8338532: Speed up the ClassFile API MethodTypeDesc#ofDescriptor Reviewed-by: redestad, liach --- .../classes/java/lang/constant/ClassDesc.java | 3 +- .../java/lang/constant/ConstantDescs.java | 18 +-- .../java/lang/invoke/ConstantBootstraps.java | 3 +- .../jdk/internal/constant/ConstantUtils.java | 106 ++++++++--------- .../internal/constant/MethodTypeDescImpl.java | 107 ++++++++++++++++-- .../constant/PrimitiveClassDescImpl.java | 43 ++++++- .../constant/ReferenceClassDescImpl.java | 4 +- .../classes/sun/invoke/util/Wrapper.java | 15 --- .../internal/constant/ConstantUtilsTest.java | 2 +- .../constant/MethodTypeDescFactories.java | 5 + .../bench/java/lang/invoke/Wrappers.java | 7 -- 11 files changed, 202 insertions(+), 111 deletions(-) diff --git a/src/java.base/share/classes/java/lang/constant/ClassDesc.java b/src/java.base/share/classes/java/lang/constant/ClassDesc.java index d2da4ec5c85..0c714850a8f 100644 --- a/src/java.base/share/classes/java/lang/constant/ClassDesc.java +++ b/src/java.base/share/classes/java/lang/constant/ClassDesc.java @@ -36,6 +36,7 @@ import static jdk.internal.constant.ConstantUtils.MAX_ARRAY_TYPE_DESC_DIMENSIONS; import static jdk.internal.constant.ConstantUtils.arrayDepth; import static jdk.internal.constant.ConstantUtils.binaryToInternal; +import static jdk.internal.constant.ConstantUtils.forPrimitiveType; import static jdk.internal.constant.ConstantUtils.internalToBinary; import static jdk.internal.constant.ConstantUtils.validateBinaryClassName; import static jdk.internal.constant.ConstantUtils.validateInternalClassName; @@ -164,7 +165,7 @@ static ClassDesc of(String packageName, String className) { static ClassDesc ofDescriptor(String descriptor) { // implicit null-check return (descriptor.length() == 1) - ? Wrapper.forPrimitiveType(descriptor.charAt(0)).basicClassDescriptor() + ? forPrimitiveType(descriptor, 0) // will throw IAE on descriptor.length == 0 or if array dimensions too long : ReferenceClassDescImpl.of(descriptor); } diff --git a/src/java.base/share/classes/java/lang/constant/ConstantDescs.java b/src/java.base/share/classes/java/lang/constant/ConstantDescs.java index 9947b77ff83..708dfd8fb15 100644 --- a/src/java.base/share/classes/java/lang/constant/ConstantDescs.java +++ b/src/java.base/share/classes/java/lang/constant/ConstantDescs.java @@ -240,31 +240,31 @@ private ConstantDescs() { } CD_Object, CD_Object); /** {@link ClassDesc} representing the primitive type {@code int} */ - public static final ClassDesc CD_int = new PrimitiveClassDescImpl("I"); + public static final ClassDesc CD_int = PrimitiveClassDescImpl.CD_int; /** {@link ClassDesc} representing the primitive type {@code long} */ - public static final ClassDesc CD_long = new PrimitiveClassDescImpl("J"); + public static final ClassDesc CD_long = PrimitiveClassDescImpl.CD_long; /** {@link ClassDesc} representing the primitive type {@code float} */ - public static final ClassDesc CD_float = new PrimitiveClassDescImpl("F"); + public static final ClassDesc CD_float = PrimitiveClassDescImpl.CD_float; /** {@link ClassDesc} representing the primitive type {@code double} */ - public static final ClassDesc CD_double = new PrimitiveClassDescImpl("D"); + public static final ClassDesc CD_double = PrimitiveClassDescImpl.CD_double; /** {@link ClassDesc} representing the primitive type {@code short} */ - public static final ClassDesc CD_short = new PrimitiveClassDescImpl("S"); + public static final ClassDesc CD_short = PrimitiveClassDescImpl.CD_short; /** {@link ClassDesc} representing the primitive type {@code byte} */ - public static final ClassDesc CD_byte = new PrimitiveClassDescImpl("B"); + public static final ClassDesc CD_byte = PrimitiveClassDescImpl.CD_byte; /** {@link ClassDesc} representing the primitive type {@code char} */ - public static final ClassDesc CD_char = new PrimitiveClassDescImpl("C"); + public static final ClassDesc CD_char = PrimitiveClassDescImpl.CD_char; /** {@link ClassDesc} representing the primitive type {@code boolean} */ - public static final ClassDesc CD_boolean = new PrimitiveClassDescImpl("Z"); + public static final ClassDesc CD_boolean = PrimitiveClassDescImpl.CD_boolean; /** {@link ClassDesc} representing the primitive type {@code void} */ - public static final ClassDesc CD_void = new PrimitiveClassDescImpl("V"); + public static final ClassDesc CD_void = PrimitiveClassDescImpl.CD_void; /** * {@link MethodHandleDesc} representing {@link MethodHandles#classData(Lookup, String, Class) MethodHandles.classData} diff --git a/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java b/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java index 179004f2687..3f35b5144fb 100644 --- a/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java +++ b/src/java.base/share/classes/java/lang/invoke/ConstantBootstraps.java @@ -25,6 +25,7 @@ package java.lang.invoke; import sun.invoke.util.Wrapper; +import jdk.internal.constant.ConstantUtils; import static java.lang.invoke.MethodHandleNatives.mapLookupExceptionToError; import static java.util.Objects.requireNonNull; @@ -112,7 +113,7 @@ public static Class primitiveClass(MethodHandles.Lookup lookup, String name, throw new IllegalArgumentException(String.format("not primitive: %s", name)); } - return Wrapper.forPrimitiveType(name.charAt(0)).primitiveType(); + return ConstantUtils.forPrimitiveType(name, 0).resolveConstantDesc(lookup); } /** diff --git a/src/java.base/share/classes/jdk/internal/constant/ConstantUtils.java b/src/java.base/share/classes/jdk/internal/constant/ConstantUtils.java index da90f373eb5..f58bead542f 100644 --- a/src/java.base/share/classes/jdk/internal/constant/ConstantUtils.java +++ b/src/java.base/share/classes/jdk/internal/constant/ConstantUtils.java @@ -35,6 +35,8 @@ import java.util.List; import java.util.Set; +import static jdk.internal.constant.PrimitiveClassDescImpl.*; + /** * Helper methods for the implementation of {@code java.lang.constant}. */ @@ -269,59 +271,40 @@ public static String dropFirstAndLastChar(String s) { return s.substring(1, s.length() - 1); } - /** - * Parses a method descriptor string, and return a list of field descriptor - * strings, return type first, then parameter types - * - * @param descriptor the descriptor string - * @return the list of types - * @throws IllegalArgumentException if the descriptor string is not valid - */ - public static List parseMethodDescriptor(String descriptor) { - int cur = 0, end = descriptor.length(); - ArrayList ptypes = new ArrayList<>(); - ptypes.add(null); // placeholder for return type - - if (cur >= end || descriptor.charAt(cur) != '(') - throw new IllegalArgumentException("Bad method descriptor: " + descriptor); - - ++cur; // skip '(' - while (cur < end && descriptor.charAt(cur) != ')') { - int len = skipOverFieldSignature(descriptor, cur, end, false); - if (len == 0) - throw new IllegalArgumentException("Bad method descriptor: " + descriptor); - ptypes.add(resolveClassDesc(descriptor, cur, len)); - cur += len; - } - if (cur >= end) - throw new IllegalArgumentException("Bad method descriptor: " + descriptor); - ++cur; // skip ')' - - int rLen = skipOverFieldSignature(descriptor, cur, end, true); - if (rLen == 0 || cur + rLen != end) - throw new IllegalArgumentException("Bad method descriptor: " + descriptor); - ptypes.set(0, resolveClassDesc(descriptor, cur, rLen)); - return ptypes; + public static PrimitiveClassDescImpl forPrimitiveType(String descriptor, int offset) { + return switch (descriptor.charAt(offset)) { + case JVM_SIGNATURE_BYTE -> CD_byte; + case JVM_SIGNATURE_CHAR -> CD_char; + case JVM_SIGNATURE_FLOAT -> CD_float; + case JVM_SIGNATURE_DOUBLE -> CD_double; + case JVM_SIGNATURE_INT -> CD_int; + case JVM_SIGNATURE_LONG -> CD_long; + case JVM_SIGNATURE_SHORT -> CD_short; + case JVM_SIGNATURE_VOID -> CD_void; + case JVM_SIGNATURE_BOOLEAN -> CD_boolean; + default -> throw badMethodDescriptor(descriptor); + }; } - private static ClassDesc resolveClassDesc(String descriptor, int start, int len) { + static ClassDesc resolveClassDesc(String descriptor, int start, int len) { if (len == 1) { - return Wrapper.forPrimitiveType(descriptor.charAt(start)).basicClassDescriptor(); + return forPrimitiveType(descriptor, start); } - // Pre-verified in parseMethodDescriptor; avoid redundant verification + + // Pre-verified in MethodTypeDescImpl#ofDescriptor; avoid redundant verification return ReferenceClassDescImpl.ofValidated(descriptor.substring(start, start + len)); } + static IllegalArgumentException badMethodDescriptor(String descriptor) { + return new IllegalArgumentException("Bad method descriptor: " + descriptor); + } + private static final char JVM_SIGNATURE_ARRAY = '['; private static final char JVM_SIGNATURE_BYTE = 'B'; private static final char JVM_SIGNATURE_CHAR = 'C'; private static final char JVM_SIGNATURE_CLASS = 'L'; - private static final char JVM_SIGNATURE_ENDCLASS = ';'; - private static final char JVM_SIGNATURE_ENUM = 'E'; private static final char JVM_SIGNATURE_FLOAT = 'F'; private static final char JVM_SIGNATURE_DOUBLE = 'D'; - private static final char JVM_SIGNATURE_FUNC = '('; - private static final char JVM_SIGNATURE_ENDFUNC = ')'; private static final char JVM_SIGNATURE_INT = 'I'; private static final char JVM_SIGNATURE_LONG = 'J'; private static final char JVM_SIGNATURE_SHORT = 'S'; @@ -334,17 +317,22 @@ private static ClassDesc resolveClassDesc(String descriptor, int start, int len) * @param descriptor the descriptor string * @param start the starting index into the string * @param end the ending index within the string - * @param voidOK is void acceptable? * @return the length of the descriptor, or 0 if it is not a descriptor * @throws IllegalArgumentException if the descriptor string is not valid */ - @SuppressWarnings("fallthrough") - static int skipOverFieldSignature(String descriptor, int start, int end, boolean voidOK) { + static int skipOverFieldSignature(String descriptor, int start, int end) { int arrayDim = 0; int index = start; - while (index < end) { - switch (descriptor.charAt(index)) { - case JVM_SIGNATURE_VOID: if (!voidOK) { return 0; } + if (index < end) { + char ch; + while ((ch = descriptor.charAt(index++)) == JVM_SIGNATURE_ARRAY) { + arrayDim++; + } + if (arrayDim > MAX_ARRAY_TYPE_DESC_DIMENSIONS) { + throw maxArrayTypeDescDimensions(); + } + + switch (ch) { case JVM_SIGNATURE_BOOLEAN: case JVM_SIGNATURE_BYTE: case JVM_SIGNATURE_CHAR: @@ -353,16 +341,16 @@ static int skipOverFieldSignature(String descriptor, int start, int end, boolean case JVM_SIGNATURE_FLOAT: case JVM_SIGNATURE_LONG: case JVM_SIGNATURE_DOUBLE: - return index - start + 1; + return index - start; case JVM_SIGNATURE_CLASS: // state variable for detection of illegal states, such as: // empty unqualified name, '//', leading '/', or trailing '/' boolean legal = false; - while (++index < end) { - switch (descriptor.charAt(index)) { + while (index < end) { + switch (descriptor.charAt(index++)) { case ';' -> { // illegal state on parser exit indicates empty unqualified name or trailing '/' - return legal ? index - start + 1 : 0; + return legal ? index - start : 0; } case '.', '[' -> { // do not permit '.' or '[' @@ -377,21 +365,17 @@ static int skipOverFieldSignature(String descriptor, int start, int end, boolean legal = true; } } - return 0; - case JVM_SIGNATURE_ARRAY: - arrayDim++; - if (arrayDim > MAX_ARRAY_TYPE_DESC_DIMENSIONS) { - throw new IllegalArgumentException(String.format("Cannot create an array type descriptor with more than %d dimensions", - ConstantUtils.MAX_ARRAY_TYPE_DESC_DIMENSIONS)); - } - // The rest of what's there better be a legal descriptor - index++; - voidOK = false; break; default: - return 0; + break; } } return 0; } + + private static IllegalArgumentException maxArrayTypeDescDimensions() { + return new IllegalArgumentException(String.format( + "Cannot create an array type descriptor with more than %d dimensions", + ConstantUtils.MAX_ARRAY_TYPE_DESC_DIMENSIONS)); + } } diff --git a/src/java.base/share/classes/jdk/internal/constant/MethodTypeDescImpl.java b/src/java.base/share/classes/jdk/internal/constant/MethodTypeDescImpl.java index 5061c95cb42..7d4442c255e 100644 --- a/src/java.base/share/classes/jdk/internal/constant/MethodTypeDescImpl.java +++ b/src/java.base/share/classes/jdk/internal/constant/MethodTypeDescImpl.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,17 +28,25 @@ import jdk.internal.vm.annotation.Stable; import java.lang.constant.ClassDesc; +import java.lang.constant.ConstantDescs; import java.lang.constant.MethodTypeDesc; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import static java.util.Objects.requireNonNull; +import static jdk.internal.constant.ConstantUtils.badMethodDescriptor; +import static jdk.internal.constant.ConstantUtils.resolveClassDesc; +import static jdk.internal.constant.ConstantUtils.skipOverFieldSignature; +import static jdk.internal.constant.ConstantUtils.EMPTY_CLASSDESC; +import static jdk.internal.constant.PrimitiveClassDescImpl.CD_void; + /** * A nominal descriptor for a * {@link MethodType}. A {@linkplain MethodTypeDescImpl} corresponds to a @@ -91,7 +100,7 @@ private static ClassDesc validateArgument(ClassDesc arg) { */ public static MethodTypeDescImpl ofValidated(ClassDesc returnType, ClassDesc... trustedArgTypes) { if (trustedArgTypes.length == 0) - return new MethodTypeDescImpl(returnType, ConstantUtils.EMPTY_CLASSDESC); + return new MethodTypeDescImpl(returnType, EMPTY_CLASSDESC); return new MethodTypeDescImpl(returnType, trustedArgTypes); } @@ -105,18 +114,98 @@ public static MethodTypeDescImpl ofValidated(ClassDesc returnType, ClassDesc... * @jvms 4.3.3 Method Descriptors */ public static MethodTypeDescImpl ofDescriptor(String descriptor) { - // Implicit null-check of descriptor - List ptypes = ConstantUtils.parseMethodDescriptor(descriptor); - int args = ptypes.size() - 1; - ClassDesc[] paramTypes = args > 0 - ? ptypes.subList(1, args + 1).toArray(ConstantUtils.EMPTY_CLASSDESC) - : ConstantUtils.EMPTY_CLASSDESC; - - MethodTypeDescImpl result = ofValidated(ptypes.get(0), paramTypes); + int length = descriptor.length(); + int rightBracket, retTypeLength; + if (descriptor.charAt(0) != '(' + || (rightBracket = (descriptor.charAt(1) == ')' ? 1 : descriptor.lastIndexOf(')'))) <= 0 + || (retTypeLength = length - rightBracket - 1) == 0 + || (retTypeLength != 1 // if retTypeLength == 1, check correctness in resolveClassDesc + && retTypeLength != skipOverFieldSignature(descriptor, rightBracket + 1, length)) + ) { + throw badMethodDescriptor(descriptor); + } + + var returnType = resolveClassDesc(descriptor, rightBracket + 1, retTypeLength); + if (length == 3 && returnType == CD_void) { + return (MethodTypeDescImpl) ConstantDescs.MTD_void; + } + var paramTypes = paramTypes(descriptor, 1, rightBracket); + var result = new MethodTypeDescImpl(returnType, paramTypes); result.cachedDescriptorString = descriptor; return result; } + private static ClassDesc[] paramTypes(String descriptor, int start, int end) { + if (start == end) { + return EMPTY_CLASSDESC; + } + + /* + * If the length of the first 8 parameters is < 256, save them in lengths to avoid ArrayList allocation + * Stop storing for the last parameter (we can compute length), or if too many parameters or too long. + */ + // little endian storage - lowest byte is encoded length 0 + long packedLengths = 0; + int packedCount = 0; + int cur = start; + while (cur < end) { + int len = skipOverFieldSignature(descriptor, cur, end); + if (len == 0) { + throw badMethodDescriptor(descriptor); + } + cur += len; + if (len > 0xFF || packedCount >= Long.SIZE / Byte.SIZE || cur == end) { + // Cannot or do not have to pack this item, but is already scanned and valid + break; + } + packedLengths = packedLengths | (((long) len) << (Byte.SIZE * packedCount++)); + } + + // Invariant: packedCount parameters encoded in packedLengths, + // And another valid parameter pointed by cur + + // Recover encoded elements + ClassDesc[] paramTypes = null; + List paramTypeList = null; + if (cur == end) { + paramTypes = new ClassDesc[packedCount + 1]; + } else { + paramTypeList = new ArrayList<>(32); + } + + int last = start; + for (int i = 0; i < packedCount; i++) { + int len = Byte.toUnsignedInt((byte) (packedLengths >> (Byte.SIZE * i))); + var cd = resolveClassDesc(descriptor, last, len); + if (paramTypes != null) { + paramTypes[i] = cd; + } else { + paramTypeList.add(cd); + } + last += len; + } + var lastCd = resolveClassDesc(descriptor, last, cur - last); + + if (paramTypes != null) { + paramTypes[packedCount] = lastCd; + return paramTypes; + } + paramTypeList.add(lastCd); + return buildParamTypes(descriptor, cur, end, paramTypeList); + } + + // slow path + private static ClassDesc[] buildParamTypes(String descriptor, int cur, int end, List list) { + while (cur < end) { + int len = skipOverFieldSignature(descriptor, cur, end); + if (len == 0) + throw badMethodDescriptor(descriptor); + list.add(resolveClassDesc(descriptor, cur, len)); + cur += len; + } + + return list.toArray(EMPTY_CLASSDESC); + } @Override public ClassDesc returnType() { diff --git a/src/java.base/share/classes/jdk/internal/constant/PrimitiveClassDescImpl.java b/src/java.base/share/classes/jdk/internal/constant/PrimitiveClassDescImpl.java index 123c76a1e9f..35aa4007816 100644 --- a/src/java.base/share/classes/jdk/internal/constant/PrimitiveClassDescImpl.java +++ b/src/java.base/share/classes/jdk/internal/constant/PrimitiveClassDescImpl.java @@ -29,6 +29,7 @@ import java.lang.constant.DynamicConstantDesc; import java.lang.invoke.MethodHandles; +import jdk.internal.vm.annotation.Stable; import sun.invoke.util.Wrapper; import static java.util.Objects.requireNonNull; @@ -40,7 +41,35 @@ public final class PrimitiveClassDescImpl extends DynamicConstantDesc> implements ClassDesc { + /** {@link ClassDesc} representing the primitive type {@code int} */ + public static final PrimitiveClassDescImpl CD_int = new PrimitiveClassDescImpl("I"); + + /** {@link ClassDesc} representing the primitive type {@code long} */ + public static final PrimitiveClassDescImpl CD_long = new PrimitiveClassDescImpl("J"); + + /** {@link ClassDesc} representing the primitive type {@code float} */ + public static final PrimitiveClassDescImpl CD_float = new PrimitiveClassDescImpl("F"); + + /** {@link ClassDesc} representing the primitive type {@code double} */ + public static final PrimitiveClassDescImpl CD_double = new PrimitiveClassDescImpl("D"); + + /** {@link ClassDesc} representing the primitive type {@code short} */ + public static final PrimitiveClassDescImpl CD_short = new PrimitiveClassDescImpl("S"); + + /** {@link ClassDesc} representing the primitive type {@code byte} */ + public static final PrimitiveClassDescImpl CD_byte = new PrimitiveClassDescImpl("B"); + + /** {@link ClassDesc} representing the primitive type {@code char} */ + public static final PrimitiveClassDescImpl CD_char = new PrimitiveClassDescImpl("C"); + + /** {@link ClassDesc} representing the primitive type {@code boolean} */ + public static final PrimitiveClassDescImpl CD_boolean = new PrimitiveClassDescImpl("Z"); + + /** {@link ClassDesc} representing the primitive type {@code void} */ + public static final PrimitiveClassDescImpl CD_void = new PrimitiveClassDescImpl("V"); + private final String descriptor; + private @Stable Wrapper lazyWrapper; // initialized only after this /** * Creates a {@linkplain ClassDesc} given a descriptor string for a primitive @@ -52,14 +81,18 @@ public final class PrimitiveClassDescImpl * describe a valid primitive type * @jvms 4.3 Descriptors */ - public PrimitiveClassDescImpl(String descriptor) { + private PrimitiveClassDescImpl(String descriptor) { super(ConstantDescs.BSM_PRIMITIVE_CLASS, requireNonNull(descriptor), ConstantDescs.CD_Class); - if (descriptor.length() != 1 - || "VIJCSBFDZ".indexOf(descriptor.charAt(0)) < 0) - throw new IllegalArgumentException(String.format("not a valid primitive type descriptor: %s", descriptor)); this.descriptor = descriptor; } + public Wrapper wrapper() { + var wrapper = this.lazyWrapper; + if (wrapper != null) + return wrapper; + return this.lazyWrapper = Wrapper.forBasicType(descriptorString().charAt(0)); + } + @Override public String descriptorString() { return descriptor; @@ -67,7 +100,7 @@ public String descriptorString() { @Override public Class resolveConstantDesc(MethodHandles.Lookup lookup) { - return Wrapper.forBasicType(descriptorString().charAt(0)).primitiveType(); + return wrapper().primitiveType(); } @Override diff --git a/src/java.base/share/classes/jdk/internal/constant/ReferenceClassDescImpl.java b/src/java.base/share/classes/jdk/internal/constant/ReferenceClassDescImpl.java index 71473b87e18..b8cc9aacfe0 100644 --- a/src/java.base/share/classes/jdk/internal/constant/ReferenceClassDescImpl.java +++ b/src/java.base/share/classes/jdk/internal/constant/ReferenceClassDescImpl.java @@ -52,7 +52,7 @@ private ReferenceClassDescImpl(String descriptor) { */ public static ReferenceClassDescImpl of(String descriptor) { int dLen = descriptor.length(); - int len = ConstantUtils.skipOverFieldSignature(descriptor, 0, dLen, false); + int len = ConstantUtils.skipOverFieldSignature(descriptor, 0, dLen); if (len <= 1 || len != dLen) throw new IllegalArgumentException(String.format("not a valid reference type descriptor: %s", descriptor)); return new ReferenceClassDescImpl(descriptor); @@ -66,7 +66,7 @@ public static ReferenceClassDescImpl of(String descriptor) { * @jvms 4.3.2 Field Descriptors */ public static ReferenceClassDescImpl ofValidated(String descriptor) { - assert ConstantUtils.skipOverFieldSignature(descriptor, 0, descriptor.length(), false) + assert ConstantUtils.skipOverFieldSignature(descriptor, 0, descriptor.length()) == descriptor.length() : descriptor; return new ReferenceClassDescImpl(descriptor); } diff --git a/src/java.base/share/classes/sun/invoke/util/Wrapper.java b/src/java.base/share/classes/sun/invoke/util/Wrapper.java index 5d0849cd93d..4e8beaabb34 100644 --- a/src/java.base/share/classes/sun/invoke/util/Wrapper.java +++ b/src/java.base/share/classes/sun/invoke/util/Wrapper.java @@ -304,21 +304,6 @@ public static Wrapper forPrimitiveType(Class type) { throw newIllegalArgumentException("not primitive: " + type); } - /** Return the wrapper that corresponds to the provided basic type char. - * The basic type char must be for one of the eight primitive types, or void. - * @throws IllegalArgumentException for unexpected types - */ - public static Wrapper forPrimitiveType(char basicTypeChar) { - Wrapper w = FROM_CHAR[(basicTypeChar + (basicTypeChar >> 1)) & 0xf]; - if (w == null || w.basicTypeChar != basicTypeChar) { - throw basicTypeError(basicTypeChar); - } - if (w == OBJECT) { - throw newIllegalArgumentException("not primitive: " + basicTypeChar); - } - return w; - } - /** Return the wrapper that wraps values into the given wrapper type. * If it is {@code Object}, return {@code OBJECT}. * Otherwise, it must be a wrapper type. diff --git a/test/jdk/java/lang/constant/boottest/java.base/jdk/internal/constant/ConstantUtilsTest.java b/test/jdk/java/lang/constant/boottest/java.base/jdk/internal/constant/ConstantUtilsTest.java index f3e9619a0e1..3afcc64bc4a 100644 --- a/test/jdk/java/lang/constant/boottest/java.base/jdk/internal/constant/ConstantUtilsTest.java +++ b/test/jdk/java/lang/constant/boottest/java.base/jdk/internal/constant/ConstantUtilsTest.java @@ -69,7 +69,7 @@ public void testValidateMemberName() { } public void testSkipOverFieldSignatureVoid() { - int ret = ConstantUtils.skipOverFieldSignature("(V)V", 1, 4, false); + int ret = ConstantUtils.skipOverFieldSignature("(V)V", 1, 4); assertEquals(ret, 0, "Descriptor of (V)V starting at index 1, void disallowed"); } } diff --git a/test/micro/org/openjdk/bench/java/lang/constant/MethodTypeDescFactories.java b/test/micro/org/openjdk/bench/java/lang/constant/MethodTypeDescFactories.java index 64019c37c14..a2a13f7bd4d 100644 --- a/test/micro/org/openjdk/bench/java/lang/constant/MethodTypeDescFactories.java +++ b/test/micro/org/openjdk/bench/java/lang/constant/MethodTypeDescFactories.java @@ -58,9 +58,14 @@ public class MethodTypeDescFactories { @Param({ "(Ljava/lang/Object;Ljava/lang/String;)I", "()V", + "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;", + "(Ljava/lang/Integer;Ljava/lang/Integer;)Ljava/lang/Integer;", + "()Ljava/lang/Object;", "([IJLjava/lang/String;Z)Ljava/util/List;", "()[Ljava/lang/String;", "(..IIJ)V", + "([III.Z[B..[.[B).", "(.....................)." }) public String descString; diff --git a/test/micro/org/openjdk/bench/java/lang/invoke/Wrappers.java b/test/micro/org/openjdk/bench/java/lang/invoke/Wrappers.java index 931afeb1603..61aea6b6dcf 100644 --- a/test/micro/org/openjdk/bench/java/lang/invoke/Wrappers.java +++ b/test/micro/org/openjdk/bench/java/lang/invoke/Wrappers.java @@ -112,11 +112,4 @@ public void forBasicType(Blackhole bh) throws Throwable { bh.consume(Wrapper.forBasicType(c)); } } - - @Benchmark - public void forPrimitiveType(Blackhole bh) throws Throwable { - for (char c : PRIM_TYPES) { - bh.consume(Wrapper.forPrimitiveType(c)); - } - } } From 0e8fe3550b628c6617ac7593d7e17ef7d9bc0869 Mon Sep 17 00:00:00 2001 From: Maurizio Cimadamore Date: Wed, 21 Aug 2024 15:11:32 +0000 Subject: [PATCH 62/67] 8338677: Improve startup of memory access var handles by simplifying combinator chains Reviewed-by: redestad --- .../X-VarHandleSegmentView.java.template | 238 +++++++++--------- .../foreign/AbstractMemorySegmentImpl.java | 10 + .../jdk/internal/foreign/LayoutPath.java | 42 ++-- .../classes/jdk/internal/foreign/Utils.java | 28 ++- 4 files changed, 168 insertions(+), 150 deletions(-) diff --git a/src/java.base/share/classes/java/lang/invoke/X-VarHandleSegmentView.java.template b/src/java.base/share/classes/java/lang/invoke/X-VarHandleSegmentView.java.template index 0c088cd5c4b..c5942e93c46 100644 --- a/src/java.base/share/classes/java/lang/invoke/X-VarHandleSegmentView.java.template +++ b/src/java.base/share/classes/java/lang/invoke/X-VarHandleSegmentView.java.template @@ -25,9 +25,11 @@ package java.lang.invoke; import jdk.internal.foreign.AbstractMemorySegmentImpl; +import jdk.internal.foreign.Utils; import jdk.internal.misc.ScopedMemoryAccess; import jdk.internal.vm.annotation.ForceInline; +import java.lang.foreign.MemoryLayout; import java.lang.foreign.MemorySegment; import java.lang.ref.Reference; @@ -45,7 +47,7 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { static final int NON_PLAIN_ACCESS_MIN_ALIGN_MASK = $BoxType$.BYTES - 1; - static final VarForm FORM = new VarForm(VarHandleSegmentAs$Type$s.class, MemorySegment.class, $type$.class, long.class); + static final VarForm FORM = new VarForm(VarHandleSegmentAs$Type$s.class, MemorySegment.class, $type$.class, MemoryLayout.class, long.class, long.class); VarHandleSegmentAs$Type$s(boolean be, long alignmentMask, boolean exact) { super(FORM, be, alignmentMask, exact); @@ -53,7 +55,7 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { @Override final MethodType accessModeTypeUncached(VarHandle.AccessType accessType) { - return accessType.accessModeType(MemorySegment.class, $type$.class, long.class); + return accessType.accessModeType(MemorySegment.class, $type$.class, MemoryLayout.class, long.class, long.class); } @Override @@ -97,70 +99,70 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { #end[floatingPoint] @ForceInline - static AbstractMemorySegmentImpl checkReadOnly(Object obb, boolean ro) { + static AbstractMemorySegmentImpl checkSegment(Object obb, Object encl, long base, boolean ro) { AbstractMemorySegmentImpl oo = (AbstractMemorySegmentImpl)Objects.requireNonNull(obb); - oo.checkReadOnly(ro); + oo.checkEnclosingLayout(base, (MemoryLayout)encl, ro); return oo; } @ForceInline - static long offsetNonPlain(AbstractMemorySegmentImpl bb, long offset, long alignmentMask) { + static long offsetNonPlain(AbstractMemorySegmentImpl bb, long base, long offset, long alignmentMask) { if ((alignmentMask & NON_PLAIN_ACCESS_MIN_ALIGN_MASK) != NON_PLAIN_ACCESS_MIN_ALIGN_MASK) { throw VarHandleSegmentViewBase.newUnsupportedAccessModeForAlignment(alignmentMask + 1); } - return offsetPlain(bb, offset); + return offsetPlain(bb, base, offset); } @ForceInline - static long offsetPlain(AbstractMemorySegmentImpl bb, long offset) { - long base = bb.unsafeGetOffset(); - return base + offset; + static long offsetPlain(AbstractMemorySegmentImpl bb, long base, long offset) { + long segment_base = bb.unsafeGetOffset(); + return segment_base + base + offset; } @ForceInline - static $type$ get(VarHandle ob, Object obb, long base) { + static $type$ get(VarHandle ob, Object obb, Object encl, long base, long offset) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, true); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, true); #if[floatingPoint] $rawType$ rawValue = SCOPED_MEMORY_ACCESS.get$RawType$Unaligned(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base), + offsetPlain(bb, base, offset), handle.be); return $Type$.$rawType$BitsTo$Type$(rawValue); #else[floatingPoint] #if[byte] return SCOPED_MEMORY_ACCESS.get$Type$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base)); + offsetPlain(bb, base, offset)); #else[byte] return SCOPED_MEMORY_ACCESS.get$Type$Unaligned(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base), + offsetPlain(bb, base, offset), handle.be); #end[byte] #end[floatingPoint] } @ForceInline - static void set(VarHandle ob, Object obb, long base, $type$ value) { + static void set(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); #if[floatingPoint] SCOPED_MEMORY_ACCESS.put$RawType$Unaligned(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base), + offsetPlain(bb, base, offset), $Type$.$type$ToRaw$RawType$Bits(value), handle.be); #else[floatingPoint] #if[byte] SCOPED_MEMORY_ACCESS.put$Type$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base), + offsetPlain(bb, base, offset), value); #else[byte] SCOPED_MEMORY_ACCESS.put$Type$Unaligned(bb.sessionImpl(), bb.unsafeGetBase(), - offsetPlain(bb, base), + offsetPlain(bb, base, offset), value, handle.be); #end[byte] @@ -168,223 +170,223 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { } @ForceInline - static $type$ getVolatile(VarHandle ob, Object obb, long base) { + static $type$ getVolatile(VarHandle ob, Object obb, Object encl, long base, long offset) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, true); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, true); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.get$RawType$Volatile(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask))); + offsetNonPlain(bb, base, offset, handle.alignmentMask))); } @ForceInline - static void setVolatile(VarHandle ob, Object obb, long base, $type$ value) { + static void setVolatile(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); SCOPED_MEMORY_ACCESS.put$RawType$Volatile(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value)); } @ForceInline - static $type$ getAcquire(VarHandle ob, Object obb, long base) { + static $type$ getAcquire(VarHandle ob, Object obb, Object encl, long base, long offset) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, true); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, true); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.get$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask))); + offsetNonPlain(bb, base, offset, handle.alignmentMask))); } @ForceInline - static void setRelease(VarHandle ob, Object obb, long base, $type$ value) { + static void setRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); SCOPED_MEMORY_ACCESS.put$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value)); } @ForceInline - static $type$ getOpaque(VarHandle ob, Object obb, long base) { + static $type$ getOpaque(VarHandle ob, Object obb, Object encl, long base, long offset) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, true); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, true); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.get$RawType$Opaque(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask))); + offsetNonPlain(bb, base, offset, handle.alignmentMask))); } @ForceInline - static void setOpaque(VarHandle ob, Object obb, long base, $type$ value) { + static void setOpaque(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); SCOPED_MEMORY_ACCESS.put$RawType$Opaque(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value)); } #if[CAS] @ForceInline - static boolean compareAndSet(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static boolean compareAndSet(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return SCOPED_MEMORY_ACCESS.compareAndSet$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value)); } @ForceInline - static $type$ compareAndExchange(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static $type$ compareAndExchange(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.compareAndExchange$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value))); } @ForceInline - static $type$ compareAndExchangeAcquire(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static $type$ compareAndExchangeAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.compareAndExchange$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value))); } @ForceInline - static $type$ compareAndExchangeRelease(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static $type$ compareAndExchangeRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.compareAndExchange$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value))); } @ForceInline - static boolean weakCompareAndSetPlain(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static boolean weakCompareAndSetPlain(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return SCOPED_MEMORY_ACCESS.weakCompareAndSet$RawType$Plain(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value)); } @ForceInline - static boolean weakCompareAndSet(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static boolean weakCompareAndSet(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return SCOPED_MEMORY_ACCESS.weakCompareAndSet$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value)); } @ForceInline - static boolean weakCompareAndSetAcquire(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static boolean weakCompareAndSetAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return SCOPED_MEMORY_ACCESS.weakCompareAndSet$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value)); } @ForceInline - static boolean weakCompareAndSetRelease(VarHandle ob, Object obb, long base, $type$ expected, $type$ value) { + static boolean weakCompareAndSetRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ expected, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return SCOPED_MEMORY_ACCESS.weakCompareAndSet$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, expected), convEndian(handle.be, value)); } @ForceInline - static $type$ getAndSet(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndSet(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.getAndSet$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value))); } @ForceInline - static $type$ getAndSetAcquire(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndSetAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.getAndSet$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value))); } @ForceInline - static $type$ getAndSetRelease(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndSetRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); return convEndian(handle.be, SCOPED_MEMORY_ACCESS.getAndSet$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), convEndian(handle.be, value))); } #end[CAS] #if[AtomicAdd] @ForceInline - static $type$ getAndAdd(VarHandle ob, Object obb, long base, $type$ delta) { + static $type$ getAndAdd(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ delta) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndAdd$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } else { - return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), delta); + return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } } @ForceInline - static $type$ getAndAddAcquire(VarHandle ob, Object obb, long base, $type$ delta) { + static $type$ getAndAddAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ delta) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndAdd$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } else { - return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), delta); + return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } } @ForceInline - static $type$ getAndAddRelease(VarHandle ob, Object obb, long base, $type$ delta) { + static $type$ getAndAddRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ delta) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndAdd$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } else { - return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), delta); + return getAndAddConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), delta); } } @@ -403,44 +405,44 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { #if[Bitwise] @ForceInline - static $type$ getAndBitwiseOr(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseOr(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseOr$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseOrRelease(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseOrRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseOr$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseOrAcquire(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseOrAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseOr$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseOrConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @@ -457,44 +459,44 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { } @ForceInline - static $type$ getAndBitwiseAnd(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseAnd(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseAnd$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseAndRelease(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseAndRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseAnd$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseAndAcquire(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseAndAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseAnd$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseAndConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @@ -512,44 +514,44 @@ final class VarHandleSegmentAs$Type$s extends VarHandleSegmentViewBase { @ForceInline - static $type$ getAndBitwiseXor(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseXor(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseXor$RawType$(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseXorRelease(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseXorRelease(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseXor$RawType$Release(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } @ForceInline - static $type$ getAndBitwiseXorAcquire(VarHandle ob, Object obb, long base, $type$ value) { + static $type$ getAndBitwiseXorAcquire(VarHandle ob, Object obb, Object encl, long base, long offset, $type$ value) { VarHandleSegmentViewBase handle = (VarHandleSegmentViewBase)ob; - AbstractMemorySegmentImpl bb = checkReadOnly(obb, false); + AbstractMemorySegmentImpl bb = checkSegment(obb, encl, base, false); if (handle.be == BE) { return SCOPED_MEMORY_ACCESS.getAndBitwiseXor$RawType$Acquire(bb.sessionImpl(), bb.unsafeGetBase(), - offsetNonPlain(bb, base, handle.alignmentMask), + offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } else { - return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, handle.alignmentMask), value); + return getAndBitwiseXorConvEndianWithCAS(bb, offsetNonPlain(bb, base, offset, handle.alignmentMask), value); } } diff --git a/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java b/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java index 325dbe1093f..42857decf63 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java +++ b/src/java.base/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java @@ -373,6 +373,16 @@ public void checkValidState() { sessionImpl().checkValidState(); } + @ForceInline + public final void checkEnclosingLayout(long offset, MemoryLayout enclosing, boolean readOnly) { + checkAccess(offset, enclosing.byteSize(), readOnly); + if (!isAlignedForElement(offset, enclosing)) { + throw new IllegalArgumentException(String.format( + "Target offset %d is incompatible with alignment constraint %d (of %s) for segment %s" + , offset, enclosing.byteAlignment(), enclosing, this)); + } + } + public abstract long unsafeGetOffset(); public abstract Object unsafeGetBase(); diff --git a/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java b/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java index ebd83d1c5da..956a5c75875 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java +++ b/src/java.base/share/classes/jdk/internal/foreign/LayoutPath.java @@ -80,7 +80,7 @@ public class LayoutPath { MH_CHECK_ENCL_LAYOUT = lookup.findStatic(LayoutPath.class, "checkEnclosingLayout", MethodType.methodType(void.class, MemorySegment.class, long.class, MemoryLayout.class)); MH_SEGMENT_RESIZE = lookup.findStatic(LayoutPath.class, "resizeSegment", - MethodType.methodType(MemorySegment.class, MemorySegment.class, MemoryLayout.class)); + MethodType.methodType(MemorySegment.class, MemorySegment.class)); MH_ADD = lookup.findStatic(Long.class, "sum", MethodType.methodType(long.class, long.class, long.class)); } catch (Throwable ex) { @@ -180,13 +180,14 @@ public LayoutPath derefElement() { } MemoryLayout derefLayout = addressLayout.targetLayout().get(); MethodHandle handle = dereferenceHandle(false).toMethodHandle(VarHandle.AccessMode.GET); - handle = MethodHandles.filterReturnValue(handle, - MethodHandles.insertArguments(MH_SEGMENT_RESIZE, 1, derefLayout)); + handle = MethodHandles.filterReturnValue(handle, MH_SEGMENT_RESIZE); return derefPath(derefLayout, handle, this); } - private static MemorySegment resizeSegment(MemorySegment segment, MemoryLayout layout) { - return Utils.longToAddress(segment.address(), layout.byteSize(), layout.byteAlignment()); + private static MemorySegment resizeSegment(MemorySegment segment) { + // Avoid adapting for specific target layout. The check for the root layout + // size and alignment will be inserted by LayoutPath::dereferenceHandle anyway. + return Utils.longToAddress(segment.address(), Long.MAX_VALUE, 1); } // Layout path projections @@ -205,19 +206,15 @@ public VarHandle dereferenceHandle(boolean adapt) { String.format("Path does not select a value layout: %s", breadcrumbs())); } - VarHandle handle = Utils.makeRawSegmentViewVarHandle(valueLayout); - handle = MethodHandles.collectCoordinates(handle, 1, offsetHandle()); - - // we only have to check the alignment of the root layout for the first dereference we do, - // as each dereference checks the alignment of the target address when constructing its segment - // (see Utils::longToAddress) - if (derefAdapters.length == 0) { - // insert align check for the root layout on the initial MS + offset - List> coordinateTypes = handle.coordinateTypes(); - MethodHandle alignCheck = MethodHandles.insertArguments(MH_CHECK_ENCL_LAYOUT, 2, rootLayout()); - handle = MethodHandles.collectCoordinates(handle, 0, alignCheck); - int[] reorder = IntStream.concat(IntStream.of(0, 1), IntStream.range(0, coordinateTypes.size())).toArray(); - handle = MethodHandles.permuteCoordinates(handle, coordinateTypes, reorder); + VarHandle handle = Utils.makeRawSegmentViewVarHandle(valueLayout); // (MS, ML, long, long) + handle = MethodHandles.insertCoordinates(handle, 1, rootLayout()); // (MS, long, long) + if (strides.length > 0) { + MethodHandle offsetAdapter = offsetHandle(); + offsetAdapter = MethodHandles.insertArguments(offsetAdapter, 0, 0L); + handle = MethodHandles.collectCoordinates(handle, 2, offsetAdapter); // (MS, long) + } else { + // simpler adaptation + handle = MethodHandles.insertCoordinates(handle, 2, offset); // (MS, long) } if (adapt) { @@ -241,6 +238,8 @@ public VarHandle dereferenceHandle(boolean adapt) { @ForceInline private static long addScaledOffset(long base, long index, long stride, long bound) { Objects.checkIndex(index, bound); + // note: the below can overflow, depending on 'base'. When constructing var handles + // through the layout API, this is never the case, as the injected 'base' is always 0. return base + (stride * index); } @@ -285,12 +284,7 @@ public MethodHandle sliceHandle() { } private static void checkEnclosingLayout(MemorySegment segment, long offset, MemoryLayout enclosing) { - ((AbstractMemorySegmentImpl)segment).checkAccess(offset, enclosing.byteSize(), true); - if (!((AbstractMemorySegmentImpl) segment).isAlignedForElement(offset, enclosing)) { - throw new IllegalArgumentException(String.format( - "Target offset %d is incompatible with alignment constraint %d (of %s) for segment %s" - , offset, enclosing.byteAlignment(), enclosing, segment)); - } + ((AbstractMemorySegmentImpl)segment).checkEnclosingLayout(offset, enclosing, true); } public MemoryLayout layout() { diff --git a/src/java.base/share/classes/jdk/internal/foreign/Utils.java b/src/java.base/share/classes/jdk/internal/foreign/Utils.java index 2ed65e3dd04..f214d3fb806 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/Utils.java +++ b/src/java.base/share/classes/jdk/internal/foreign/Utils.java @@ -63,7 +63,8 @@ private Utils() {} private static final MethodHandle BYTE_TO_BOOL; private static final MethodHandle BOOL_TO_BYTE; private static final MethodHandle ADDRESS_TO_LONG; - private static final MethodHandle LONG_TO_ADDRESS; + private static final MethodHandle LONG_TO_ADDRESS_TARGET; + private static final MethodHandle LONG_TO_ADDRESS_NO_TARGET; static { try { @@ -74,8 +75,10 @@ private Utils() {} MethodType.methodType(byte.class, boolean.class)); ADDRESS_TO_LONG = lookup.findStatic(SharedUtils.class, "unboxSegment", MethodType.methodType(long.class, MemorySegment.class)); - LONG_TO_ADDRESS = lookup.findStatic(Utils.class, "longToAddress", - MethodType.methodType(MemorySegment.class, long.class, long.class, long.class)); + LONG_TO_ADDRESS_TARGET = lookup.findStatic(Utils.class, "longToAddress", + MethodType.methodType(MemorySegment.class, long.class, AddressLayout.class)); + LONG_TO_ADDRESS_NO_TARGET = lookup.findStatic(Utils.class, "longToAddress", + MethodType.methodType(MemorySegment.class, long.class)); } catch (Throwable ex) { throw new ExceptionInInitializerError(ex); } @@ -129,11 +132,10 @@ private static VarHandle makeRawSegmentViewVarHandleInternal(ValueLayout layout) if (layout.carrier() == boolean.class) { handle = MethodHandles.filterValue(handle, BOOL_TO_BYTE, BYTE_TO_BOOL); } else if (layout instanceof AddressLayout addressLayout) { - handle = MethodHandles.filterValue(handle, - MethodHandles.explicitCastArguments(ADDRESS_TO_LONG, MethodType.methodType(baseCarrier, MemorySegment.class)), - MethodHandles.explicitCastArguments(MethodHandles.insertArguments(LONG_TO_ADDRESS, 1, - pointeeByteSize(addressLayout), pointeeByteAlign(addressLayout)), - MethodType.methodType(MemorySegment.class, baseCarrier))); + MethodHandle longToAddressAdapter = addressLayout.targetLayout().isPresent() ? + MethodHandles.insertArguments(LONG_TO_ADDRESS_TARGET, 1, addressLayout) : + LONG_TO_ADDRESS_NO_TARGET; + handle = MethodHandles.filterValue(handle, ADDRESS_TO_LONG, longToAddressAdapter); } return handle; } @@ -146,6 +148,16 @@ private static byte booleanToByte(boolean b) { return b ? (byte)1 : (byte)0; } + @ForceInline + public static MemorySegment longToAddress(long addr) { + return longToAddress(addr, 0, 1); + } + + @ForceInline + public static MemorySegment longToAddress(long addr, AddressLayout layout) { + return longToAddress(addr, pointeeByteSize(layout), pointeeByteAlign(layout)); + } + @ForceInline public static MemorySegment longToAddress(long addr, long size, long align) { if (!isAligned(addr, align)) { From e297e8817f486e4af850c97fcff859c3e9a9e21c Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Wed, 21 Aug 2024 16:10:41 +0000 Subject: [PATCH 63/67] 8338688: Shenandoah: Avoid calling java_lang_Class accessors in asserts/verifier Reviewed-by: rkennke, wkemper --- src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 1539c7c2c5d..0bbced2e8d3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -266,7 +266,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* // Do additional checks for special objects: their fields can hold metadata as well. // We want to check class loading/unloading did not corrupt them. - if (Universe::is_fully_initialized() && java_lang_Class::is_instance(obj)) { + if (Universe::is_fully_initialized() && (obj_klass == vmClasses::Class_klass())) { Metadata* klass = obj->metadata_field(java_lang_Class::klass_offset()); if (klass != nullptr && !Metaspace::contains(klass)) { print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 4834ecba543..e5db7a9b392 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -222,7 +222,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // Do additional checks for special objects: their fields can hold metadata as well. // We want to check class loading/unloading did not corrupt them. - if (java_lang_Class::is_instance(obj)) { + if (obj_klass == vmClasses::Class_klass()) { Metadata* klass = obj->metadata_field(java_lang_Class::klass_offset()); check(ShenandoahAsserts::_safe_oop, obj, klass == nullptr || Metaspace::contains(klass), From ab8071d28027ecbf5e8984c30b35fa1c2d934de7 Mon Sep 17 00:00:00 2001 From: Doug Lea Date: Wed, 21 Aug 2024 18:22:24 +0000 Subject: [PATCH 64/67] 8338146: Improve Exchanger performance with VirtualThreads Reviewed-by: alanb --- .../java/util/concurrent/Exchanger.java | 550 +++++++----------- .../util/concurrent/ForkJoinWorkerThread.java | 26 + .../util/concurrent/LinkedTransferQueue.java | 4 +- 3 files changed, 251 insertions(+), 329 deletions(-) diff --git a/src/java.base/share/classes/java/util/concurrent/Exchanger.java b/src/java.base/share/classes/java/util/concurrent/Exchanger.java index 0096bca8c6f..8674ea9af39 100644 --- a/src/java.base/share/classes/java/util/concurrent/Exchanger.java +++ b/src/java.base/share/classes/java/util/concurrent/Exchanger.java @@ -139,125 +139,109 @@ public class Exchanger { * able to exchange items. That is, we cannot completely partition * across threads, but instead give threads arena indices that * will on average grow under contention and shrink under lack of - * contention. We approach this by defining the Nodes that we need - * anyway as ThreadLocals, and include in them per-thread index - * and related bookkeeping state. (We can safely reuse per-thread - * nodes rather than creating them fresh each time because slots - * alternate between pointing to a node vs null, so cannot - * encounter ABA problems. However, we do need some care in - * resetting them between uses.) + * contention. * - * Implementing an effective arena requires allocating a bunch of - * space, so we only do so upon detecting contention (except on - * uniprocessors, where they wouldn't help, so aren't used). - * Otherwise, exchanges use the single-slot slotExchange method. - * On contention, not only must the slots be in different - * locations, but the locations must not encounter memory - * contention due to being on the same cache line (or more - * generally, the same coherence unit). Because, as of this - * writing, there is no way to determine cacheline size, we define - * a value that is enough for common platforms. Additionally, - * extra care elsewhere is taken to avoid other false/unintended - * sharing and to enhance locality, including adding padding (via - * @Contended) to Nodes, embedding "bound" as an Exchanger field. + * We approach this by defining the Nodes holding references to + * transfered items as ThreadLocals, and include in them + * per-thread index and related bookkeeping state. We can safely + * reuse per-thread nodes rather than creating them fresh each + * time because slots alternate between pointing to a node vs + * null, so cannot encounter ABA problems. However, we must ensure + * that object transfer fields are reset between uses. Given this, + * Participant nodes can be defined as static ThreadLocals. As + * seen for example in class Striped64, using indices established + * in one instance across others usually improves overall + * performance. Nodes also include a participant-local random + * number generator. + * + * Spreading out contention requires that the memory locations + * used by the arena slots don't share a cache line -- otherwise, + * the arena would have almost no benefit. We arrange this by + * adding another level of indirection: The arena elements point + * to "Slots", each of which is padded using @Contended. We only + * create a single Slot on intialization, adding more when + * needed. The per-thread Participant Nodes may also be subject to + * false-sharing contention, but tend to be more scattered in + * memory, so are unpadded, with some occasional performance impact. * * The arena starts out with only one used slot. We expand the * effective arena size by tracking collisions; i.e., failed CASes - * while trying to exchange. By nature of the above algorithm, the - * only kinds of collision that reliably indicate contention are - * when two attempted releases collide -- one of two attempted - * offers can legitimately fail to CAS without indicating - * contention by more than one other thread. (Note: it is possible - * but not worthwhile to more precisely detect contention by - * reading slot values after CAS failures.) When a thread has - * collided at each slot within the current arena bound, it tries - * to expand the arena size by one. We track collisions within - * bounds by using a version (sequence) number on the "bound" - * field, and conservatively reset collision counts when a - * participant notices that bound has been updated (in either - * direction). + * while trying to exchange. And shrink it via "spinouts" in which + * threads give up waiting at a slot. By nature of the above + * algorithm, the only kinds of collision that reliably indicate + * contention are when two attempted releases collide -- one of + * two attempted offers can legitimately fail to CAS without + * indicating contention by more than one other thread. * - * The effective arena size is reduced (when there is more than - * one slot) by giving up on waiting after a while and trying to - * decrement the arena size on expiration. The value of "a while" - * is an empirical matter. We implement by piggybacking on the - * use of spin->yield->block that is essential for reasonable - * waiting performance anyway -- in a busy exchanger, offers are - * usually almost immediately released, in which case context - * switching on multiprocessors is extremely slow/wasteful. Arena - * waits just omit the blocking part, and instead cancel. The spin - * count is empirically chosen to be a value that avoids blocking - * 99% of the time under maximum sustained exchange rates on a - * range of test machines. Spins and yields entail some limited - * randomness (using a cheap xorshift) to avoid regular patterns - * that can induce unproductive grow/shrink cycles. (Using a - * pseudorandom also helps regularize spin cycle duration by - * making branches unpredictable.) Also, during an offer, a - * waiter can "know" that it will be released when its slot has - * changed, but cannot yet proceed until match is set. In the - * mean time it cannot cancel the offer, so instead spins/yields. - * Note: It is possible to avoid this secondary check by changing - * the linearization point to be a CAS of the match field (as done - * in one case in the Scott & Scherer DISC paper), which also - * increases asynchrony a bit, at the expense of poorer collision - * detection and inability to always reuse per-thread nodes. So - * the current scheme is typically a better tradeoff. + * Arena size (the value of field "bound") is controlled by random + * sampling. On each miss (collision or spinout), a thread chooses + * a new random index within the arena. Upon the third collision + * with the same current bound, it tries to grow the arena. And + * upon the second spinout, it tries to shrink. The asymmetry in + * part reflects relative costs, and reduces flailing. Because + * they cannot be changed without also changing the sampling + * strategy, these rules are directly incorporated into uses of + * the xchg "misses" variable. The bound field is tagged with + * sequence numbers to reduce stale decisions. Uniform random + * indices are generated using XorShift with enough bits so that + * bias (See Knuth TAoCP vol 2) is negligible for moduli used here + * (at most 256) without requiring rejection tests. Using + * nonuniform randoms with greater weight to higher indices is + * also possible but does not seem worthwhile in practice. * - * On collisions, indices traverse the arena cyclically in reverse - * order, restarting at the maximum index (which will tend to be - * sparsest) when bounds change. (On expirations, indices instead - * are halved until reaching 0.) It is possible (and has been - * tried) to use randomized, prime-value-stepped, or double-hash - * style traversal instead of simple cyclic traversal to reduce - * bunching. But empirically, whatever benefits these may have - * don't overcome their added overhead: We are managing operations - * that occur very quickly unless there is sustained contention, - * so simpler/faster control policies work better than more - * accurate but slower ones. + * These mechanics rely on a reasonable choice of constant SPINS. + * The time cost of SPINS * Thread.onSpinWait() should be at least + * the expected cost of a park/unpark context switch, and larger + * than that of two failed CASes, but still small enough to avoid + * excessive delays during arena shrinkage. We also deal with the + * possibility that when an offering thread waits for a release, + * spin-waiting would be useless because the releasing thread is + * descheduled. On multiprocessors, we cannot know this in + * general. But when Virtual Threads are used, method + * ForkJoinWorkerThread.hasKnownQueuedWork serves as a guide to + * whether to spin or immediately block, allowing a context switch + * that may enable a releaser. Note also that when many threads + * are being run on few cores, enountering enough collisions to + * trigger arena growth is rare, and soon followed by shrinkage, + * so this doesn't require special handling. * - * Because we use expiration for arena size control, we cannot - * throw TimeoutExceptions in the timed version of the public - * exchange method until the arena size has shrunken to zero (or - * the arena isn't enabled). This may delay response to timeout - * but is still within spec. + * The basic exchange mechanics rely on checks that Node item + * fields are not null, which doesn't work when offered items are + * null. We trap this case by translating nulls to the + * (un-Exchangeable) value of the static Participant + * reference. * - * Essentially all of the implementation is in methods - * slotExchange and arenaExchange. These have similar overall - * structure, but differ in too many details to combine. The - * slotExchange method uses the single Exchanger field "slot" - * rather than arena array elements. However, it still needs - * minimal collision detection to trigger arena construction. - * (The messiest part is making sure interrupt status and - * InterruptedExceptions come out right during transitions when - * both methods may be called. This is done by using null return - * as a sentinel to recheck interrupt status.) + * Essentially all of the implementation is in method xchg. As is + * too common in this sort of code, most of the logic relies on + * reads of fields that are maintained as local variables so can't + * be nicely factored. It is structured as a main loop with a + * leading volatile read (of field bound), that causes others to + * be freshly read even though declared in plain mode. We don't + * use compareAndExchange that would otherwise save some re-reads + * because of the need to recheck indices and bounds on failures. * - * As is too common in this sort of code, methods are monolithic - * because most of the logic relies on reads of fields that are - * maintained as local variables so can't be nicely factored -- - * mainly, here, bulky spin->yield->block/cancel code. Note that - * field Node.item is not declared as volatile even though it is - * read by releasing threads, because they only do so after CAS - * operations that must precede access, and all uses by the owning - * thread are otherwise acceptably ordered by other operations. - * (Because the actual points of atomicity are slot CASes, it - * would also be legal for the write to Node.match in a release to - * be weaker than a full volatile write. However, this is not done - * because it could allow further postponement of the write, - * delaying progress.) - */ - - /** - * The index distance (as a shift value) between any two used slots - * in the arena, spacing them out to avoid false sharing. + * Support for optional timeouts in a single method adds further + * complexity. Note that for the sake of arena bounds control, + * time bounds must be ignored during spinouts, which may delay + * TimeoutExceptions (but no more so than would excessive context + * switching that could occur otherwise). Responses to + * interruption are handled similarly, postponing commitment to + * throw InterruptedException until successfully cancelled. + * + * Design differences from previous releases include: + * * Accommodation of VirtualThreads. + * * Use of Slots vs spaced indices for the arena and static + * ThreadLocals, avoiding separate arena vs non-arena modes. + * * Use of random sampling for grow/shrink decisions, with typically + * faster and more stable adaptation (as was mentioned as a + * possible improvement in previous version). */ - private static final int ASHIFT = 5; /** * The maximum supported arena index. The maximum allocatable - * arena size is MMASK + 1. Must be a power of two minus one, less - * than (1<<(31-ASHIFT)). The cap of 255 (0xff) more than suffices - * for the expected scaling limits of the main algorithms. + * arena size is MMASK + 1. Must be a power of two minus one. The + * cap of 255 (0xff) more than suffices for the expected scaling + * limits of the main algorithms. */ private static final int MMASK = 0xff; @@ -267,49 +251,34 @@ public class Exchanger { */ private static final int SEQ = MMASK + 1; - /** The number of CPUs, for sizing and spin control */ - private static final int NCPU = Runtime.getRuntime().availableProcessors(); - /** - * The maximum slot index of the arena: The number of slots that - * can in principle hold all threads without contention, or at - * most the maximum indexable value. - */ - static final int FULL = (NCPU >= (MMASK << 1)) ? MMASK : NCPU >>> 1; - - /** - * The bound for spins while waiting for a match. The actual - * number of iterations will on average be about twice this value - * due to randomization. Note: Spinning is disabled when NCPU==1. + * The bound for spins while waiting for a match before either + * blocking or possibly shrinking arena. */ private static final int SPINS = 1 << 10; /** - * Value representing null arguments/returns from public - * methods. Needed because the API originally didn't disallow null - * arguments, which it should have. - */ - private static final Object NULL_ITEM = new Object(); - - /** - * Sentinel value returned by internal exchange methods upon - * timeout, to avoid need for separate timed versions of these - * methods. + * Padded arena cells to avoid false-sharing memory contention */ - private static final Object TIMED_OUT = new Object(); + @jdk.internal.vm.annotation.Contended + static final class Slot { + Node entry; + } /** * Nodes hold partially exchanged data, plus other per-thread - * bookkeeping. Padded via @Contended to reduce memory contention. + * bookkeeping. */ - @jdk.internal.vm.annotation.Contended static final class Node { + static final class Node { + long seed; // Random seed int index; // Arena index - int bound; // Last recorded value of Exchanger.bound - int collides; // Number of CAS failures at current bound - int hash; // Pseudo-random for spins Object item; // This thread's current item volatile Object match; // Item provided by releasing thread volatile Thread parked; // Set to this thread when parked, else null + Node() { + index = -1; // initialize on first use + seed = Thread.currentThread().threadId(); + } } /** The corresponding thread local class */ @@ -318,210 +287,152 @@ static final class Participant extends ThreadLocal { } /** - * Per-thread state. + * The participant thread-locals. Because it is impossible to + * exchange, we also use this reference for dealing with null user + * arguments that are translated in and out of this value + * surrounding use. */ - private final Participant participant; + private static final Participant participant = new Participant(); /** - * Elimination array; null until enabled (within slotExchange). - * Element accesses use emulation of volatile gets and CAS. + * Elimination array; element accesses use emulation of volatile + * gets and CAS. */ - private volatile Node[] arena; + private final Slot[] arena; /** - * Slot used until contention detected. + * Number of cores, for sizing and spin control. Computed only + * upon construction. */ - private volatile Node slot; + private final int ncpu; /** - * The index of the largest valid arena position, OR'ed with SEQ - * number in high bits, incremented on each update. The initial - * update from 0 to SEQ is used to ensure that the arena array is - * constructed only once. + * The index of the largest valid arena position. */ private volatile int bound; /** - * Exchange function when arenas enabled. See above for explanation. + * Exchange function. See above for explanation. * - * @param item the (non-null) item to exchange - * @param timed true if the wait is timed - * @param ns if timed, the maximum wait time, else 0L - * @return the other thread's item; or null if interrupted; or - * TIMED_OUT if timed and timed out + * @param x the item to exchange + * @param deadline if zero, untimed, else timeout deadline + * @return the other thread's item + * @throws InterruptedException if interrupted while waiting + * @throws TimeoutException if deadline nonzero and timed out */ - private final Object arenaExchange(Object item, boolean timed, long ns) { - Node[] a = arena; + private final V xchg(V x, long deadline) + throws InterruptedException, TimeoutException { + Slot[] a = arena; int alen = a.length; - Node p = participant.get(); - for (int i = p.index;;) { // access slot at i - int b, m, c; - int j = (i << ASHIFT) + ((1 << ASHIFT) - 1); - if (j < 0 || j >= alen) - j = alen - 1; - Node q = (Node)AA.getAcquire(a, j); - if (q != null && AA.compareAndSet(a, j, q, null)) { - Object v = q.item; // release - q.match = item; - Thread w = q.parked; - if (w != null) - LockSupport.unpark(w); - return v; + Participant ps = participant; + Object item = (x == null) ? ps : x; // translate nulls + Node p = ps.get(); + int i = p.index; // if < 0, move + int misses = 0; // ++ on collide, -- on spinout + Object offered = null; // for cleanup + Object v = null; + outer: for (;;) { + int b, m; Slot s; Node q; + if ((m = (b = bound) & MMASK) == 0) // volatile read + i = 0; + if (i < 0 || i > m || i >= alen || (s = a[i]) == null) { + long r = p.seed; // randomly move + r ^= r << 13; r ^= r >>> 7; r ^= r << 17; // xorShift + i = p.index = (int)((p.seed = r) % (m + 1)); + } + else if ((q = s.entry) != null) { // try release + if (ENTRY.compareAndSet(s, q, null)) { + Thread w; + v = q.item; + q.match = item; + if (i == 0 && (w = q.parked) != null) + LockSupport.unpark(w); + break; + } + else { // collision + int nb; + i = -1; // move index + if (b != bound) // stale + misses = 0; + else if (misses <= 2) // continue sampling + ++misses; + else if ((nb = (b + 1) & MMASK) < alen) { + misses = 0; // try to grow + if (BOUND.compareAndSet(this, b, b + 1 + SEQ) && + a[i = p.index = nb] == null) + AA.compareAndSet(a, nb, null, new Slot()); + } + } } - else if (i <= (m = (b = bound) & MMASK) && q == null) { - p.item = item; // offer - if (AA.compareAndSet(a, j, null, p)) { - long end = (timed && m == 0) ? System.nanoTime() + ns : 0L; - Thread t = Thread.currentThread(); // wait - for (int h = p.hash, spins = SPINS;;) { - Object v = p.match; - if (v != null) { - MATCH.setRelease(p, null); - p.item = null; // clear for next use - p.hash = h; - return v; + else { // try offer + if (offered == null) + offered = p.item = item; + if (ENTRY.compareAndSet(s, null, p)) { + boolean tryCancel; // true if interrupted + Thread t = Thread.currentThread(); + if (!(tryCancel = t.isInterrupted()) && ncpu > 1 && + (i != 0 || // check for busy VTs + (!ForkJoinWorkerThread.hasKnownQueuedWork()))) { + for (int j = SPINS; j > 0; --j) { + if ((v = p.match) != null) { + MATCH.set(p, null); + break outer; // spin wait + } + Thread.onSpinWait(); } - else if (spins > 0) { - h ^= h << 1; h ^= h >>> 3; h ^= h << 10; // xorshift - if (h == 0) // initialize hash - h = SPINS | (int)t.threadId(); - else if (h < 0 && // approx 50% true - (--spins & ((SPINS >>> 1) - 1)) == 0) - Thread.yield(); // two yields per wait + } + for (long ns = 1L;;) { // block or cancel offer + if ((v = p.match) != null) { + MATCH.set(p, null); + break outer; } - else if (AA.getAcquire(a, j) != p) - spins = SPINS; // releaser hasn't set match yet - else if (!t.isInterrupted() && m == 0 && - (!timed || - (ns = end - System.nanoTime()) > 0L)) { - p.parked = t; // minimize window - if (AA.getAcquire(a, j) == p) { - if (ns == 0L) + if (i == 0 && !tryCancel && + (deadline == 0L || + ((ns = deadline - System.nanoTime()) > 0L))) { + p.parked = t; // emable unpark and recheck + if (p.match == null) { + if (deadline == 0L) LockSupport.park(this); else LockSupport.parkNanos(this, ns); + tryCancel = t.isInterrupted(); } p.parked = null; } - else if (AA.getAcquire(a, j) == p && - AA.compareAndSet(a, j, p, null)) { - if (m != 0) // try to shrink - BOUND.compareAndSet(this, b, b + SEQ - 1); - p.item = null; - p.hash = h; - i = p.index >>>= 1; // descend + else if (ENTRY.compareAndSet(s, p, null)) { // cancel + offered = p.item = null; if (Thread.interrupted()) - return null; - if (timed && m == 0 && ns <= 0L) - return TIMED_OUT; - break; // expired; restart + throw new InterruptedException(); + if (deadline != 0L && ns <= 0L) + throw new TimeoutException(); + i = -1; // move and restart + if (bound != b) + misses = 0; // stale + else if (misses >= 0) + --misses; // continue sampling + else if ((b & MMASK) != 0) { + misses = 0; // try to shrink + BOUND.compareAndSet(this, b, b - 1 + SEQ); + } + continue outer; } } } - else - p.item = null; // clear offer - } - else { - if (p.bound != b) { // stale; reset - p.bound = b; - p.collides = 0; - i = (i != m || m == 0) ? m : m - 1; - } - else if ((c = p.collides) < m || m == FULL || - !BOUND.compareAndSet(this, b, b + SEQ + 1)) { - p.collides = c + 1; - i = (i == 0) ? m : i - 1; // cyclically traverse - } - else - i = m + 1; // grow - p.index = i; - } - } - } - - /** - * Exchange function used until arenas enabled. See above for explanation. - * - * @param item the item to exchange - * @param timed true if the wait is timed - * @param ns if timed, the maximum wait time, else 0L - * @return the other thread's item; or null if either the arena - * was enabled or the thread was interrupted before completion; or - * TIMED_OUT if timed and timed out - */ - private final Object slotExchange(Object item, boolean timed, long ns) { - Node p = participant.get(); - Thread t = Thread.currentThread(); - if (t.isInterrupted()) // preserve interrupt status so caller can recheck - return null; - - for (Node q;;) { - if ((q = slot) != null) { - if (SLOT.compareAndSet(this, q, null)) { - Object v = q.item; - q.match = item; - Thread w = q.parked; - if (w != null) - LockSupport.unpark(w); - return v; - } - // create arena on contention, but continue until slot null - if (NCPU > 1 && bound == 0 && - BOUND.compareAndSet(this, 0, SEQ)) - arena = new Node[(FULL + 2) << ASHIFT]; - } - else if (arena != null) - return null; // caller must reroute to arenaExchange - else { - p.item = item; - if (SLOT.compareAndSet(this, null, p)) - break; - p.item = null; - } - } - - // await release - int h = p.hash; - long end = timed ? System.nanoTime() + ns : 0L; - int spins = (NCPU > 1) ? SPINS : 1; - Object v; - while ((v = p.match) == null) { - if (spins > 0) { - h ^= h << 1; h ^= h >>> 3; h ^= h << 10; - if (h == 0) - h = SPINS | (int)t.threadId(); - else if (h < 0 && (--spins & ((SPINS >>> 1) - 1)) == 0) - Thread.yield(); - } - else if (slot != p) - spins = SPINS; - else if (!t.isInterrupted() && arena == null && - (!timed || (ns = end - System.nanoTime()) > 0L)) { - p.parked = t; - if (slot == p) { - if (ns == 0L) - LockSupport.park(this); - else - LockSupport.parkNanos(this, ns); - } - p.parked = null; - } - else if (SLOT.compareAndSet(this, p, null)) { - v = timed && ns <= 0L && !t.isInterrupted() ? TIMED_OUT : null; - break; } } - MATCH.setRelease(p, null); - p.item = null; - p.hash = h; - return v; + if (offered != null) // cleanup + p.item = null; + @SuppressWarnings("unchecked") V ret = (v == participant) ? null : (V)v; + return ret; } /** * Creates a new Exchanger. */ public Exchanger() { - participant = new Participant(); + int h = (ncpu = Runtime.getRuntime().availableProcessors()) >>> 1; + int size = (h == 0) ? 1 : (h > MMASK) ? MMASK + 1 : h; + (arena = new Slot[size])[0] = new Slot(); } /** @@ -557,17 +468,12 @@ public Exchanger() { * @throws InterruptedException if the current thread was * interrupted while waiting */ - @SuppressWarnings("unchecked") public V exchange(V x) throws InterruptedException { - Object v; - Node[] a; - Object item = (x == null) ? NULL_ITEM : x; // translate null args - if (((a = arena) != null || - (v = slotExchange(item, false, 0L)) == null) && - (Thread.interrupted() || // disambiguates null return - (v = arenaExchange(item, false, 0L)) == null)) - throw new InterruptedException(); - return (v == NULL_ITEM) ? null : (V)v; + try { + return xchg(x, 0L); + } catch (TimeoutException cannotHappen) { + return null; // not reached + } } /** @@ -612,34 +518,24 @@ public V exchange(V x) throws InterruptedException { * @throws TimeoutException if the specified waiting time elapses * before another thread enters the exchange */ - @SuppressWarnings("unchecked") public V exchange(V x, long timeout, TimeUnit unit) throws InterruptedException, TimeoutException { - Object v; - Object item = (x == null) ? NULL_ITEM : x; - long ns = unit.toNanos(timeout); - if ((arena != null || - (v = slotExchange(item, true, ns)) == null) && - (Thread.interrupted() || - (v = arenaExchange(item, true, ns)) == null)) - throw new InterruptedException(); - if (v == TIMED_OUT) - throw new TimeoutException(); - return (v == NULL_ITEM) ? null : (V)v; + long d = unit.toNanos(timeout) + System.nanoTime(); + return xchg(x, (d == 0L) ? 1L : d); // avoid zero deadline } // VarHandle mechanics private static final VarHandle BOUND; - private static final VarHandle SLOT; private static final VarHandle MATCH; + private static final VarHandle ENTRY; private static final VarHandle AA; static { try { MethodHandles.Lookup l = MethodHandles.lookup(); BOUND = l.findVarHandle(Exchanger.class, "bound", int.class); - SLOT = l.findVarHandle(Exchanger.class, "slot", Node.class); MATCH = l.findVarHandle(Node.class, "match", Object.class); - AA = MethodHandles.arrayElementVarHandle(Node[].class); + ENTRY = l.findVarHandle(Slot.class, "entry", Node.class); + AA = MethodHandles.arrayElementVarHandle(Slot[].class); } catch (ReflectiveOperationException e) { throw new ExceptionInInitializerError(e); } diff --git a/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java b/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java index 1b2777c6e4a..2995fe3c63d 100644 --- a/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java +++ b/src/java.base/share/classes/java/util/concurrent/ForkJoinWorkerThread.java @@ -39,6 +39,8 @@ import java.security.AccessControlContext; import java.security.PrivilegedAction; import java.security.ProtectionDomain; +import jdk.internal.access.JavaLangAccess; +import jdk.internal.access.SharedSecrets; /** * A thread managed by a {@link ForkJoinPool}, which executes @@ -202,6 +204,30 @@ public void run() { } } + /** + * Returns true if the current task is being executed by a + * ForkJoinWorkerThread that is momentarily known to have one or + * more queued tasks that it could execute immediately. This + * method is approximate and useful only as a heuristic indicator + * within a running task. + * + * @return true if the current task is being executed by a worker + * that has queued work + */ + static boolean hasKnownQueuedWork() { + ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue q, sq; + ForkJoinPool p; ForkJoinPool.WorkQueue[] qs; int i; + Thread c = JLA.currentCarrierThread(); + return ((c instanceof ForkJoinWorkerThread) && + (p = (wt = (ForkJoinWorkerThread)c).pool) != null && + (q = wt.workQueue) != null && + (i = q.source) >= 0 && // check local and current source queues + (((qs = p.queues) != null && qs.length > i && + (sq = qs[i]) != null && sq.top - sq.base > 0) || + q.top - q.base > 0)); + } + private static final JavaLangAccess JLA = SharedSecrets.getJavaLangAccess(); + /** * A worker thread that has no permissions, is not a member of any * user-defined ThreadGroup, uses the system class loader as diff --git a/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java b/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java index a0d3176c762..118d648c7a2 100644 --- a/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java +++ b/src/java.base/share/classes/java/util/concurrent/LinkedTransferQueue.java @@ -426,8 +426,8 @@ final Object await(Object e, long ns, Object blocker, boolean spin) { long deadline = (timed) ? System.nanoTime() + ns : 0L; boolean upc = isUniprocessor; // don't spin but later recheck Thread w = Thread.currentThread(); - if (w.isVirtual()) // don't spin - spin = false; + if (spin && ForkJoinWorkerThread.hasKnownQueuedWork()) + spin = false; // don't spin int spins = (spin & !upc) ? SPINS : 0; // negative when may park while ((m = item) == e) { if (spins >= 0) { From 47c8a6a8db979fe862be876008feb76cdc9dccfd Mon Sep 17 00:00:00 2001 From: Claes Redestad Date: Wed, 21 Aug 2024 22:10:06 +0000 Subject: [PATCH 65/67] 8333265: De-duplicate method references in java.util.stream.FindOps Reviewed-by: liach --- .../classes/java/util/stream/FindOps.java | 65 ++++++++++++------- .../java/util/stream/ops/ref/FindAny.java | 6 ++ 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/src/java.base/share/classes/java/util/stream/FindOps.java b/src/java.base/share/classes/java/util/stream/FindOps.java index 131eec06236..afe31a90bdc 100644 --- a/src/java.base/share/classes/java/util/stream/FindOps.java +++ b/src/java.base/share/classes/java/util/stream/FindOps.java @@ -194,13 +194,16 @@ public Optional get() { return hasValue ? Optional.of(value) : null; } - static final TerminalOp OP_FIND_FIRST = new FindOp<>(true, - StreamShape.REFERENCE, Optional.empty(), - Optional::isPresent, FindSink.OfRef::new); - - static final TerminalOp OP_FIND_ANY = new FindOp<>(false, - StreamShape.REFERENCE, Optional.empty(), - Optional::isPresent, FindSink.OfRef::new); + static final TerminalOp OP_FIND_FIRST, OP_FIND_ANY; + static { + Predicate> isPresent = Optional::isPresent; + Supplier>> newSink + = FindSink.OfRef::new; + OP_FIND_FIRST = new FindOp<>(true, StreamShape.REFERENCE, + Optional.empty(), isPresent, newSink); + OP_FIND_ANY = new FindOp<>(false, StreamShape.REFERENCE, + Optional.empty(), isPresent, newSink); + } } /** Specialization of {@code FindSink} for int streams */ @@ -217,12 +220,16 @@ public OptionalInt get() { return hasValue ? OptionalInt.of(value) : null; } - static final TerminalOp OP_FIND_FIRST = new FindOp<>(true, - StreamShape.INT_VALUE, OptionalInt.empty(), - OptionalInt::isPresent, FindSink.OfInt::new); - static final TerminalOp OP_FIND_ANY = new FindOp<>(false, - StreamShape.INT_VALUE, OptionalInt.empty(), - OptionalInt::isPresent, FindSink.OfInt::new); + static final TerminalOp OP_FIND_FIRST, OP_FIND_ANY; + static { + Predicate isPresent = OptionalInt::isPresent; + Supplier> newSink + = FindSink.OfInt::new; + OP_FIND_FIRST = new FindOp<>(true, StreamShape.INT_VALUE, + OptionalInt.empty(), isPresent, newSink); + OP_FIND_ANY = new FindOp<>(false, StreamShape.INT_VALUE, + OptionalInt.empty(), isPresent, newSink); + } } /** Specialization of {@code FindSink} for long streams */ @@ -239,12 +246,16 @@ public OptionalLong get() { return hasValue ? OptionalLong.of(value) : null; } - static final TerminalOp OP_FIND_FIRST = new FindOp<>(true, - StreamShape.LONG_VALUE, OptionalLong.empty(), - OptionalLong::isPresent, FindSink.OfLong::new); - static final TerminalOp OP_FIND_ANY = new FindOp<>(false, - StreamShape.LONG_VALUE, OptionalLong.empty(), - OptionalLong::isPresent, FindSink.OfLong::new); + static final TerminalOp OP_FIND_FIRST, OP_FIND_ANY; + static { + Predicate isPresent = OptionalLong::isPresent; + Supplier> newSink + = FindSink.OfLong::new; + OP_FIND_FIRST = new FindOp<>(true, StreamShape.LONG_VALUE, + OptionalLong.empty(), isPresent, newSink); + OP_FIND_ANY = new FindOp<>(false, StreamShape.LONG_VALUE, + OptionalLong.empty(), isPresent, newSink); + } } /** Specialization of {@code FindSink} for double streams */ @@ -261,12 +272,16 @@ public OptionalDouble get() { return hasValue ? OptionalDouble.of(value) : null; } - static final TerminalOp OP_FIND_FIRST = new FindOp<>(true, - StreamShape.DOUBLE_VALUE, OptionalDouble.empty(), - OptionalDouble::isPresent, FindSink.OfDouble::new); - static final TerminalOp OP_FIND_ANY = new FindOp<>(false, - StreamShape.DOUBLE_VALUE, OptionalDouble.empty(), - OptionalDouble::isPresent, FindSink.OfDouble::new); + static final TerminalOp OP_FIND_FIRST, OP_FIND_ANY; + static { + Predicate isPresent = OptionalDouble::isPresent; + Supplier> newSink + = FindSink.OfDouble::new; + OP_FIND_FIRST = new FindOp<>(true, StreamShape.DOUBLE_VALUE, + OptionalDouble.empty(), isPresent, newSink); + OP_FIND_ANY = new FindOp<>(false, StreamShape.DOUBLE_VALUE, + OptionalDouble.empty(), isPresent, newSink); + } } } diff --git a/test/micro/org/openjdk/bench/java/util/stream/ops/ref/FindAny.java b/test/micro/org/openjdk/bench/java/util/stream/ops/ref/FindAny.java index 217a750ade7..baf43a2e96c 100644 --- a/test/micro/org/openjdk/bench/java/util/stream/ops/ref/FindAny.java +++ b/test/micro/org/openjdk/bench/java/util/stream/ops/ref/FindAny.java @@ -60,4 +60,10 @@ public Long par_invoke() { return LongStream.range(0, size).parallel().boxed().findAny().get(); } + public static void main(String... args) { + FindAny findAny = new FindAny(); + findAny.size = 100000; + findAny.seq_invoke(); + findAny.par_invoke(); + } } From 1d05989bb4bcc08ef6f7c408ec987ea43995eb07 Mon Sep 17 00:00:00 2001 From: Axel Boldt-Christmas Date: Thu, 22 Aug 2024 06:23:06 +0000 Subject: [PATCH 66/67] 8334357: Use NonInterleavingLogStream for report_metadata_oome Reviewed-by: jsjolen, stuefe --- src/hotspot/share/memory/metaspace.cpp | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 7e01b42a873..ad51f5ab7b6 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -893,20 +893,22 @@ void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_s tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); // If result is still null, we are out of memory. - Log(gc, metaspace, freelist, oom) log; - if (log.is_info()) { - log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, - is_class_space_allocation(mdtype) ? "class" : "data", word_size); - ResourceMark rm; - if (log.is_debug()) { - if (loader_data->metaspace_or_null() != nullptr) { - LogStream ls(log.debug()); - loader_data->print_value_on(&ls); + { + LogMessage(gc, metaspace, freelist, oom) log; + if (log.is_info()) { + log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT, + is_class_space_allocation(mdtype) ? "class" : "data", word_size); + ResourceMark rm; + if (log.is_debug()) { + if (loader_data->metaspace_or_null() != nullptr) { + NonInterleavingLogStream ls(LogLevelType::Debug, log); + loader_data->print_value_on(&ls); + } } + NonInterleavingLogStream ls(LogLevelType::Info, log); + // In case of an OOM, log out a short but still useful report. + MetaspaceUtils::print_basic_report(&ls, 0); } - LogStream ls(log.info()); - // In case of an OOM, log out a short but still useful report. - MetaspaceUtils::print_basic_report(&ls, 0); } bool out_of_compressed_class_space = false; From b3c6e05992b98f7a036e96bd68ac8112345c34fa Mon Sep 17 00:00:00 2001 From: Axel Boldt-Christmas Date: Fri, 23 Aug 2024 05:47:29 +0000 Subject: [PATCH 67/67] 8338810: PPC, s390x: LightweightSynchronizer::exit asserts, missing lock Reviewed-by: mdoerr, amitkumar --- src/hotspot/cpu/ppc/macroAssembler_ppc.cpp | 6 +++++- src/hotspot/cpu/s390/macroAssembler_s390.cpp | 8 +++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 3b48b4020cc..9fb4a43097d 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2902,7 +2902,11 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(ConditionRegister f // Check for monitor (0b10). ld(mark, oopDesc::mark_offset_in_bytes(), obj); andi_(t, mark, markWord::monitor_value); - bne(CCR0, inflated); + if (!UseObjectMonitorTable) { + bne(CCR0, inflated); + } else { + bne(CCR0, push_and_slow); + } #ifdef ASSERT // Check header not unlocked (0b01). diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index b31d08f9fde..d527b4d2aea 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -6286,6 +6286,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis BLOCK_COMMENT("compiler_fast_lightweight_unlock {"); { // Lightweight Unlock + NearLabel push_and_slow_path; // Check if obj is top of lock-stack. z_lgf(top, Address(Z_thread, ls_top_offset)); @@ -6315,7 +6316,11 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis // Check for monitor (0b10). z_lg(mark, Address(obj, mark_offset)); z_tmll(mark, markWord::monitor_value); - z_brnaz(inflated); + if (!UseObjectMonitorTable) { + z_brnaz(inflated); + } else { + z_brnaz(push_and_slow_path); + } #ifdef ASSERT // Check header not unlocked (0b01). @@ -6334,6 +6339,7 @@ void MacroAssembler::compiler_fast_unlock_lightweight_object(Register obj, Regis branch_optimized(Assembler::bcondEqual, unlocked); } + bind(push_and_slow_path); // Restore lock-stack and handle the unlock in runtime. z_lgf(top, Address(Z_thread, ls_top_offset)); DEBUG_ONLY(z_stg(obj, Address(Z_thread, top));)