From 1e2c0132c812c6ec6299dd86759b04c32a296ec7 Mon Sep 17 00:00:00 2001 From: "maoliang.ml" Date: Thu, 6 Jul 2023 11:37:55 +0800 Subject: [PATCH] [Backport] 8241825: Make compressed oops and compressed class pointers independent(x86_64) Summary: Make UseCompressedOops and UseCompressedClassPointers independent on x86_64 Test Plan: CICD Reviewed-by: kuaiwei, yulei, lingjun Issue: https://github.com/dragonwell-project/dragonwell11/issues/669 --- .../cpu/aarch64/globalDefinitions_aarch64.hpp | 2 + src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp | 2 + .../cpu/s390/globalDefinitions_s390.hpp | 2 + .../cpu/sparc/globalDefinitions_sparc.hpp | 2 + src/hotspot/cpu/x86/c1_FrameMap_x86.hpp | 2 +- src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp | 44 +++--- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 10 +- src/hotspot/cpu/x86/c1_Runtime1_x86.cpp | 3 +- src/hotspot/cpu/x86/globalDefinitions_x86.hpp | 6 + src/hotspot/cpu/x86/interp_masm_x86.cpp | 6 +- src/hotspot/cpu/x86/macroAssembler_x86.cpp | 113 +++++++-------- src/hotspot/cpu/x86/macroAssembler_x86.hpp | 20 ++- src/hotspot/cpu/x86/methodHandles_x86.cpp | 8 +- src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp | 2 +- src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 6 +- src/hotspot/cpu/x86/stubGenerator_x86_64.cpp | 19 ++- src/hotspot/cpu/x86/templateTable_x86.cpp | 30 ++-- src/hotspot/cpu/x86/vtableStubs_x86_32.cpp | 4 +- src/hotspot/cpu/x86/vtableStubs_x86_64.cpp | 7 +- src/hotspot/cpu/x86/x86_64.ad | 48 +++---- src/hotspot/share/classfile/javaClasses.cpp | 18 +-- src/hotspot/share/memory/metaspace.cpp | 7 +- src/hotspot/share/opto/lcm.cpp | 4 +- src/hotspot/share/runtime/arguments.cpp | 23 ++- .../gc/metaspace/TestSizeTransitions.java | 24 ++-- .../CompressedClassPointers.java | 136 +++++++++++++++++- .../CompressedClassSpaceSize.java | 8 -- ...CountAfterGCEventWithG1ConcurrentMark.java | 2 +- ...CountAfterGCEventWithG1FullCollection.java | 2 +- ...bjectCountAfterGCEventWithParallelOld.java | 2 +- ...TestObjectCountAfterGCEventWithSerial.java | 2 +- .../gc/objectcount/TestObjectCountEvent.java | 2 +- 32 files changed, 360 insertions(+), 206 deletions(-) diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp index a15e962fa73..a53cde94b3f 100644 --- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp @@ -62,4 +62,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false; #define NOT_R18_RESERVED(code) code #endif +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true + #endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP diff --git a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp index 43b64033c07..785d2275e16 100644 --- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp +++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp @@ -62,4 +62,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true; // Define the condition to use this -XX flag. #define USE_POLL_BIT_ONLY UseSIGTRAP +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true + #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP diff --git a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp index 8f8c37ed0c4..eacbd87f49c 100644 --- a/src/hotspot/cpu/s390/globalDefinitions_s390.hpp +++ b/src/hotspot/cpu/s390/globalDefinitions_s390.hpp @@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true; #define THREAD_LOCAL_POLL +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true + #endif // CPU_S390_VM_GLOBALDEFINITIONS_S390_HPP diff --git a/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp b/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp index 998119fe7d4..db933030203 100644 --- a/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp +++ b/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp @@ -55,4 +55,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true; // SPARC have implemented the local polling #define THREAD_LOCAL_POLL +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true + #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP diff --git a/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp b/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp index 0b3bafb2e15..76456fb430f 100644 --- a/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp +++ b/src/hotspot/cpu/x86/c1_FrameMap_x86.hpp @@ -148,7 +148,7 @@ static int adjust_reg_range(int range) { // Reduce the number of available regs (to free r12) in case of compressed oops - if (UseCompressedOops || UseCompressedClassPointers) return range - 1; + if (UseCompressedOops) return range - 1; return range; } diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index b66197184a4..172930fb6c2 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -1176,6 +1176,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch LIR_Address* addr = src->as_address_ptr(); Address from_addr = as_Address(addr); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); if (addr->base()->type() == T_OBJECT) { __ verify_oop(addr->base()->as_pointer_register()); @@ -1353,7 +1354,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { #ifdef _LP64 if (UseCompressedClassPointers) { - __ decode_klass_not_null(dest->as_register()); + __ decode_klass_not_null(dest->as_register(), tmp_load_klass); } #endif } @@ -1641,6 +1642,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L Register dst = op->result_opr()->as_register(); ciKlass* k = op->klass(); Register Rtmp1 = noreg; + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); // check if it needs to be profiled ciMethodData* md = NULL; @@ -1704,7 +1706,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L // not a safepoint as obj null check happens earlier #ifdef _LP64 if (UseCompressedClassPointers) { - __ load_klass(Rtmp1, obj); + __ load_klass(Rtmp1, obj, tmp_load_klass); __ cmpptr(k_RInfo, Rtmp1); } else { __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); @@ -1721,7 +1723,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L } else { // get object class // not a safepoint as obj null check happens earlier - __ load_klass(klass_RInfo, obj); + __ load_klass(klass_RInfo, obj, tmp_load_klass); if (k->is_loaded()) { // See if we get an immediate positive hit #ifdef _LP64 @@ -1776,7 +1778,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ mov_metadata(mdo, md->constant_encoding()); - __ load_klass(recv, obj); + __ load_klass(recv, obj, tmp_load_klass); Label update_done; type_profile_helper(mdo, md, data, recv, success); __ jmp(*success); @@ -1792,6 +1794,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); LIR_Code code = op->code(); if (code == lir_store_check) { Register value = op->object()->as_register(); @@ -1837,8 +1840,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { } add_debug_info_for_null_check_here(op->info_for_exception()); - __ load_klass(k_RInfo, array); - __ load_klass(klass_RInfo, value); + __ load_klass(k_RInfo, array, tmp_load_klass); + __ load_klass(klass_RInfo, value, tmp_load_klass); // get instance klass (it's already uncompressed) __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); @@ -1859,7 +1862,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { Register mdo = klass_RInfo, recv = k_RInfo; __ bind(profile_cast_success); __ mov_metadata(mdo, md->constant_encoding()); - __ load_klass(recv, value); + __ load_klass(recv, value, tmp_load_klass); Label update_done; type_profile_helper(mdo, md, data, recv, &done); __ jmpb(done); @@ -3057,6 +3060,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register dst_pos = op->dst_pos()->as_register(); Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); CodeStub* stub = op->stub(); int flags = op->flags(); @@ -3202,13 +3206,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // an instance type. if (flags & LIR_OpArrayCopy::type_check) { if (!(flags & LIR_OpArrayCopy::dst_objarray)) { - __ load_klass(tmp, dst); + __ load_klass(tmp, dst, tmp_load_klass); __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); __ jcc(Assembler::greaterEqual, *stub->entry()); } if (!(flags & LIR_OpArrayCopy::src_objarray)) { - __ load_klass(tmp, src); + __ load_klass(tmp, src, tmp_load_klass); __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); __ jcc(Assembler::greaterEqual, *stub->entry()); } @@ -3265,8 +3269,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ push(src); __ push(dst); - __ load_klass(src, src); - __ load_klass(dst, dst); + __ load_klass(src, src, tmp_load_klass); + __ load_klass(dst, dst, tmp_load_klass); __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); @@ -3294,9 +3298,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { assert(flags & mask, "one of the two should be known to be an object array"); if (!(flags & LIR_OpArrayCopy::src_objarray)) { - __ load_klass(tmp, src); + __ load_klass(tmp, src, tmp_load_klass); } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { - __ load_klass(tmp, dst); + __ load_klass(tmp, dst, tmp_load_klass); } int lh_offset = in_bytes(Klass::layout_helper_offset()); Address klass_lh_addr(tmp, lh_offset); @@ -3340,14 +3344,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #ifdef _WIN64 // Allocate abi space for args but be sure to keep stack aligned __ subptr(rsp, 6*wordSize); - __ load_klass(c_rarg3, dst); + __ load_klass(c_rarg3, dst, tmp_load_klass); __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); store_parameter(c_rarg3, 4); __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); __ call(RuntimeAddress(copyfunc_addr)); __ addptr(rsp, 6*wordSize); #else - __ load_klass(c_rarg4, dst); + __ load_klass(c_rarg4, dst, tmp_load_klass); __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); __ call(RuntimeAddress(copyfunc_addr)); @@ -3412,7 +3416,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ mov_metadata(tmp, default_type->constant_encoding()); #ifdef _LP64 if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp); + __ encode_klass_not_null(tmp, rscratch1); } #endif @@ -3516,6 +3520,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { ciMethod* method = op->profiled_method(); int bci = op->profiled_bci(); ciMethod* callee = op->profiled_callee(); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); // Update counter for all call types ciMethodData* md = method->method_data_or_null(); @@ -3568,7 +3573,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { } } } else { - __ load_klass(recv, recv); + __ load_klass(recv, recv, tmp_load_klass); Label update_done; type_profile_helper(mdo, md, data, recv, &update_done); // Receiver did not match any saved receiver and there is no empty row for it. @@ -3586,6 +3591,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { Register obj = op->obj()->as_register(); Register tmp = op->tmp()->as_pointer_register(); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); ciKlass* exact_klass = op->exact_klass(); intptr_t current_klass = op->current_klass(); @@ -3632,7 +3638,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { #ifdef ASSERT if (exact_klass != NULL) { Label ok; - __ load_klass(tmp, tmp); + __ load_klass(tmp, tmp, tmp_load_klass); __ push(tmp); __ mov_metadata(tmp, exact_klass->constant_encoding()); __ cmpptr(tmp, Address(rsp, 0)); @@ -3647,7 +3653,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { if (exact_klass != NULL) { __ mov_metadata(tmp, exact_klass->constant_encoding()); } else { - __ load_klass(tmp, tmp); + __ load_klass(tmp, tmp, tmp_load_klass); } __ xorptr(tmp, mdo_addr); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 7bf4cf7b41f..99857b604d0 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -55,7 +55,9 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr if (UseBiasedLocking) { assert(scratch != noreg, "should have scratch register at this point"); - null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); + Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg); + null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case); + } else { null_check_offset = offset(); } @@ -152,6 +154,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); + Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); if (UseBiasedLocking && !len->is_valid()) { assert_different_registers(obj, klass, len, t1, t2); movptr(t1, Address(klass, Klass::prototype_header_offset())); @@ -163,7 +166,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register #ifdef _LP64 if (UseCompressedClassPointers) { // Take care not to kill klass movptr(t1, klass); - encode_klass_not_null(t1); + encode_klass_not_null(t1, tmp_encode_klass); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); } else #endif @@ -298,9 +301,10 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { // check against inline cache assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); int start_offset = offset(); + Register tmp_load_klass = LP64_ONLY(rscratch2) NOT_LP64(noreg); if (UseCompressedClassPointers) { - load_klass(rscratch1, receiver); + load_klass(rscratch1, receiver, tmp_load_klass); cmpptr(rscratch1, iCache); } else { cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp index 9dfca911c0c..3f9951407ac 100644 --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -1242,8 +1242,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { // load the klass and check the has finalizer flag Label register_finalizer; + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); Register t = rsi; - __ load_klass(t, rax); + __ load_klass(t, rax, tmp_load_klass); __ movl(t, Address(t, Klass::access_flags_offset())); __ testl(t, JVM_ACC_HAS_FINALIZER); __ jcc(Assembler::notZero, register_finalizer); diff --git a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp index 94d1ee20eca..6b605da0e59 100644 --- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp +++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp @@ -67,4 +67,10 @@ const bool CCallingConventionRequiresIntsAsLongs = false; #define THREAD_LOCAL_POLL +#if INCLUDE_JVMCI +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT) +#else +#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false +#endif + #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index 70475091037..928118b911d 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -58,7 +58,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md jmpb(next); bind(update); - load_klass(obj, obj); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + load_klass(obj, obj, tmp_load_klass); xorptr(obj, mdo_addr); testptr(obj, TypeEntries::type_klass_mask); @@ -1188,7 +1189,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { movptr(obj_reg, Address(lock_reg, obj_offset)); if (UseBiasedLocking) { - biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); + Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg); + biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case); } // Load immediate 1 into swap_reg %rax diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index eed28499cb8..9d807192f96 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1112,6 +1112,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, + Register tmp_reg2, bool swap_reg_contains_mark, Label& done, Label* slow_case, @@ -1156,7 +1157,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, if (swap_reg_contains_mark) { null_check_offset = offset(); } - load_prototype_header(tmp_reg, obj_reg); + load_prototype_header(tmp_reg, obj_reg, tmp_reg2); #ifdef _LP64 orptr(tmp_reg, r15_thread); xorptr(tmp_reg, swap_reg); @@ -1244,7 +1245,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, // // FIXME: due to a lack of registers we currently blow away the age // bits in this situation. Should attempt to preserve them. - load_prototype_header(tmp_reg, obj_reg); + load_prototype_header(tmp_reg, obj_reg, tmp_reg2); #ifdef _LP64 orptr(tmp_reg, r15_thread); #else @@ -1281,7 +1282,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg, // FIXME: due to a lack of registers we currently blow away the age // bits in this situation. Should attempt to preserve them. NOT_LP64( movptr(swap_reg, saved_mark_addr); ) - load_prototype_header(tmp_reg, obj_reg); + load_prototype_header(tmp_reg, obj_reg, tmp_reg2); if (os::is_MP()) { lock(); } @@ -1717,7 +1718,6 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg if (use_rtm) { assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg); } else { - assert(cx1Reg == noreg, ""); assert(cx2Reg == noreg, ""); assert_different_registers(objReg, boxReg, tmpReg, scrReg); } @@ -1758,7 +1758,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg // at [FETCH], below, will never observe a biased encoding (*101b). // If this invariant is not held we risk exclusion (safety) failure. if (UseBiasedLocking && !UseOptoBiasInlining) { - biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); + biased_locking_enter(boxReg, objReg, tmpReg, scrReg, cx1Reg, false, DONE_LABEL, NULL, counters); } #if INCLUDE_RTM_OPT @@ -2534,7 +2534,7 @@ void MacroAssembler::call_VM_base(Register oop_result, #ifdef ASSERT // TraceBytecodes does not use r12 but saves it over the call, so don't verify // r12 is the heapbase. - LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) + LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) #endif // ASSERT assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); @@ -5591,25 +5591,29 @@ void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) resolve_oop_handle(mirror, tmp); } -void MacroAssembler::load_klass(Register dst, Register src) { +void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { + assert_different_registers(src, tmp); + assert_different_registers(dst, tmp); #ifdef _LP64 if (UseCompressedClassPointers) { movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); - decode_klass_not_null(dst); + decode_klass_not_null(dst, tmp); } else #endif movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); } -void MacroAssembler::load_prototype_header(Register dst, Register src) { - load_klass(dst, src); +void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { + load_klass(dst, src, tmp); movptr(dst, Address(dst, Klass::prototype_header_offset())); } -void MacroAssembler::store_klass(Register dst, Register src) { +void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { + assert_different_registers(src, tmp); + assert_different_registers(dst, tmp); #ifdef _LP64 if (UseCompressedClassPointers) { - encode_klass_not_null(src); + encode_klass_not_null(src, tmp); movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); } else #endif @@ -5814,61 +5818,38 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { } } -void MacroAssembler::encode_klass_not_null(Register r) { +void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { + assert_different_registers(r, tmp); if (Universe::narrow_klass_base() != NULL) { - // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. - assert(r != r12_heapbase, "Encoding a klass in r12"); - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - subq(r, r12_heapbase); + mov64(tmp, (int64_t)Universe::narrow_klass_base()); + subq(r, tmp); } if (Universe::narrow_klass_shift() != 0) { assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shrq(r, LogKlassAlignmentInBytes); } - if (Universe::narrow_klass_base() != NULL) { - reinit_heapbase(); - } -} - -void MacroAssembler::encode_klass_not_null(Register dst, Register src) { - if (dst == src) { - encode_klass_not_null(src); - } else { - if (Universe::narrow_klass_base() != NULL) { - mov64(dst, (int64_t)Universe::narrow_klass_base()); - negq(dst); - addq(dst, src); - } else { - movptr(dst, src); - } - if (Universe::narrow_klass_shift() != 0) { - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); - shrq(dst, LogKlassAlignmentInBytes); - } - } } -// Function instr_size_for_decode_klass_not_null() counts the instructions -// generated by decode_klass_not_null(register r) and reinit_heapbase(), -// when (Universe::heap() != NULL). Hence, if the instructions they -// generate change, then this method needs to be updated. -int MacroAssembler::instr_size_for_decode_klass_not_null() { - assert (UseCompressedClassPointers, "only for compressed klass ptrs"); +void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { + assert_different_registers(src, dst); if (Universe::narrow_klass_base() != NULL) { - // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). - return (Universe::narrow_klass_shift() == 0 ? 20 : 24); + mov64(dst, -(int64_t)Universe::narrow_klass_base()); + addq(dst, src); } else { - // longest load decode klass function, mov64, leaq - return 16; + movptr(dst, src); + } + if (Universe::narrow_klass_shift() != 0) { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); + shrq(dst, LogKlassAlignmentInBytes); } } // !!! If the instructions that get generated here change then function // instr_size_for_decode_klass_not_null() needs to get updated. -void MacroAssembler::decode_klass_not_null(Register r) { +void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { + assert_different_registers(r, tmp); // Note: it will change flags - assert (UseCompressedClassPointers, "should only be used for compressed headers"); - assert(r != r12_heapbase, "Decoding a klass in r12"); + assert(UseCompressedClassPointers, "should only be used for compressed headers"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. @@ -5876,24 +5857,30 @@ void MacroAssembler::decode_klass_not_null(Register r) { assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shlq(r, LogKlassAlignmentInBytes); } - // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. if (Universe::narrow_klass_base() != NULL) { - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - addq(r, r12_heapbase); - reinit_heapbase(); + mov64(tmp, (int64_t)Universe::narrow_klass_base()); + addq(r, tmp); } } -void MacroAssembler::decode_klass_not_null(Register dst, Register src) { +void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { + assert_different_registers(src, dst); // Note: it will change flags assert (UseCompressedClassPointers, "should only be used for compressed headers"); - if (dst == src) { - decode_klass_not_null(dst); + // Cannot assert, unverified entry point counts instructions (see .ad file) + // vtableStubs also counts instructions in pd_code_size_limit. + // Also do not verify_oop as this is called by verify_oop. + if (Universe::narrow_klass_base() == NULL && + Universe::narrow_klass_shift() == 0) { + // The best case scenario is that there is no base or shift. Then it is already + // a pointer that needs nothing but a register rename. + movl(dst, src); } else { - // Cannot assert, unverified entry point counts instructions (see .ad file) - // vtableStubs also counts instructions in pd_code_size_limit. - // Also do not verify_oop as this is called by verify_oop. - mov64(dst, (int64_t)Universe::narrow_klass_base()); + if (Universe::narrow_klass_base() != NULL) { + mov64(dst, (int64_t)Universe::narrow_klass_base()); + } else { + xorq(dst, dst); + } if (Universe::narrow_klass_shift() != 0) { assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); @@ -5973,7 +5960,7 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { } void MacroAssembler::reinit_heapbase() { - if (UseCompressedOops || UseCompressedClassPointers) { + if (UseCompressedOops) { if (Universe::heap() != NULL) { if (Universe::narrow_oop_base() == NULL) { MacroAssembler::xorptr(r12_heapbase, r12_heapbase); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 90cc11dc6eb..0965b913bd4 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -319,8 +319,8 @@ class MacroAssembler: public Assembler { void load_mirror(Register mirror, Register method, Register tmp = rscratch2); // oop manipulations - void load_klass(Register dst, Register src); - void store_klass(Register dst, Register src); + void load_klass(Register dst, Register src, Register tmp); + void store_klass(Register dst, Register src, Register tmp); void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, Register tmp1, Register thread_tmp); @@ -338,7 +338,7 @@ class MacroAssembler: public Assembler { // stored using routines that take a jobject. void store_heap_oop_null(Address dst); - void load_prototype_header(Register dst, Register src); + void load_prototype_header(Register dst, Register src, Register tmp); #ifdef _LP64 void store_klass_gap(Register dst, Register src); @@ -361,19 +361,15 @@ class MacroAssembler: public Assembler { void cmp_narrow_oop(Register dst, jobject obj); void cmp_narrow_oop(Address dst, jobject obj); - void encode_klass_not_null(Register r); - void decode_klass_not_null(Register r); - void encode_klass_not_null(Register dst, Register src); - void decode_klass_not_null(Register dst, Register src); + void encode_klass_not_null(Register r, Register tmp); + void decode_klass_not_null(Register r, Register tmp); + void encode_and_move_klass_not_null(Register dst, Register src); + void decode_and_move_klass_not_null(Register dst, Register src); void set_narrow_klass(Register dst, Klass* k); void set_narrow_klass(Address dst, Klass* k); void cmp_narrow_klass(Register dst, Klass* k); void cmp_narrow_klass(Address dst, Klass* k); - // Returns the byte size of the instructions generated by decode_klass_not_null() - // when compressed klass pointers are being used. - static int instr_size_for_decode_klass_not_null(); - // if heap base register is used - reinit it with the correct value void reinit_heapbase(); @@ -669,7 +665,7 @@ class MacroAssembler: public Assembler { // the calling code has already passed any potential faults. int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg, - bool swap_reg_contains_mark, + Register tmp_reg2, bool swap_reg_contains_mark, Label& done, Label* slow_case = NULL, BiasedLockingCounters* counters = NULL); void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp index 5b2bdba733c..a69d073aef3 100644 --- a/src/hotspot/cpu/x86/methodHandles_x86.cpp +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp @@ -72,7 +72,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm, Klass* klass = SystemDictionary::well_known_klass(klass_id); Register temp = rdi; Register temp2 = noreg; - LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr + LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr and load_klass Label L_ok, L_bad; BLOCK_COMMENT("verify_klass {"); __ verify_oop(obj); @@ -80,7 +80,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm, __ jcc(Assembler::zero, L_bad); __ push(temp); if (temp2 != noreg) __ push(temp2); #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } - __ load_klass(temp, obj); + __ load_klass(temp, obj, temp2); __ cmpptr(temp, ExternalAddress((address) klass_addr)); __ jcc(Assembler::equal, L_ok); intptr_t super_check_offset = klass->super_check_offset(); @@ -350,7 +350,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, } else { // load receiver klass itself __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); - __ load_klass(temp1_recv_klass, receiver_reg); + __ load_klass(temp1_recv_klass, receiver_reg, temp2); __ verify_klass_ptr(temp1_recv_klass); } BLOCK_COMMENT("check_receiver {"); @@ -358,7 +358,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, // Check the receiver against the MemberName.clazz if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { // Did not load it above... - __ load_klass(temp1_recv_klass, receiver_reg); + __ load_klass(temp1_recv_klass, receiver_reg, temp2); __ verify_klass_ptr(temp1_recv_klass); } if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index 20b2ff1b3a4..cbfe84600ab 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -2032,7 +2032,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, if (UseBiasedLocking) { // Note that oop_handle_reg is trashed during this call - __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock); + __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock); } // Load immediate 1 into swap_reg %rax, diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 5ef078a370f..9e9410c9dc4 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -980,7 +980,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm Register temp = rbx; { - __ load_klass(temp, receiver); + __ load_klass(temp, receiver, rscratch1); __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset())); __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset())); __ jcc(Assembler::equal, ok); @@ -2109,7 +2109,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, assert_different_registers(ic_reg, receiver, rscratch1); __ verify_oop(receiver); - __ load_klass(rscratch1, receiver); + __ load_klass(rscratch1, receiver, rscratch2); __ cmpq(ic_reg, rscratch1); __ jcc(Assembler::equal, hit); @@ -2445,7 +2445,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ movptr(obj_reg, Address(oop_handle_reg, 0)); if (UseBiasedLocking) { - __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); + __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock); } // Load immediate 1 into swap_reg %rax diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 6f5cf4ae076..5ee6c9482f6 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -1159,11 +1159,8 @@ class StubGenerator: public StubCodeGenerator { __ cmpptr(c_rarg2, c_rarg3); __ jcc(Assembler::notZero, error); - // set r12 to heapbase for load_klass() - __ reinit_heapbase(); - // make sure klass is 'reasonable', which is not zero. - __ load_klass(rax, rax); // get klass + __ load_klass(rax, rax, rscratch1); // get klass __ testptr(rax, rax); __ jcc(Assembler::zero, error); // if klass is NULL it is broken @@ -2506,7 +2503,7 @@ class StubGenerator: public StubCodeGenerator { __ testptr(rax_oop, rax_oop); __ jcc(Assembler::zero, L_store_element); - __ load_klass(r11_klass, rax_oop);// query the object klass + __ load_klass(r11_klass, rax_oop, rscratch1);// query the object klass generate_type_check(r11_klass, ckoff, ckval, L_store_element); // ======== end loop ======== @@ -2669,8 +2666,10 @@ class StubGenerator: public StubCodeGenerator { const Register dst_pos = c_rarg3; // destination position #ifndef _WIN64 const Register length = c_rarg4; + const Register rklass_tmp = r9; // load_klass #else const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 + const Register rklass_tmp = rdi; // load_klass #endif { int modulus = CodeEntryAlignment; @@ -2743,7 +2742,7 @@ class StubGenerator: public StubCodeGenerator { __ testl(r11_length, r11_length); __ jccb(Assembler::negative, L_failed_0); - __ load_klass(r10_src_klass, src); + __ load_klass(r10_src_klass, src, rklass_tmp); #ifdef ASSERT // assert(src->klass() != NULL); { @@ -2754,7 +2753,7 @@ class StubGenerator: public StubCodeGenerator { __ bind(L1); __ stop("broken null klass"); __ bind(L2); - __ load_klass(rax, dst); + __ load_klass(rax, dst, rklass_tmp); __ cmpq(rax, 0); __ jcc(Assembler::equal, L1); // this would be broken also BLOCK_COMMENT("} assert klasses not null done"); @@ -2777,7 +2776,7 @@ class StubGenerator: public StubCodeGenerator { __ jcc(Assembler::equal, L_objArray); // if (src->klass() != dst->klass()) return -1; - __ load_klass(rax, dst); + __ load_klass(rax, dst, rklass_tmp); __ cmpq(r10_src_klass, rax); __ jcc(Assembler::notEqual, L_failed); @@ -2876,7 +2875,7 @@ class StubGenerator: public StubCodeGenerator { Label L_plain_copy, L_checkcast_copy; // test array classes for subtyping - __ load_klass(rax, dst); + __ load_klass(rax, dst, rklass_tmp); __ cmpq(r10_src_klass, rax); // usual case is exact equality __ jcc(Assembler::notEqual, L_checkcast_copy); @@ -2904,7 +2903,7 @@ class StubGenerator: public StubCodeGenerator { rax, L_failed); const Register r11_dst_klass = r11; - __ load_klass(r11_dst_klass, dst); // reload + __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload // Marshal the base address arguments now, freeing registers. __ lea(from, Address(src, src_pos, TIMES_OOP, diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index 311a54f1b5e..f8e17b17feb 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -1127,10 +1127,11 @@ void TemplateTable::aastore() { __ testptr(rax, rax); __ jcc(Assembler::zero, is_null); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); // Move subklass into rbx - __ load_klass(rbx, rax); + __ load_klass(rbx, rax, tmp_load_klass); // Move superklass into rax - __ load_klass(rax, rdx); + __ load_klass(rax, rdx, tmp_load_klass); __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset())); @@ -1173,7 +1174,8 @@ void TemplateTable::bastore() { index_check(rdx, rbx); // prefer index in rbx // Need to check whether array is boolean or byte // since both types share the bastore bytecode. - __ load_klass(rcx, rdx); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rcx, rdx, tmp_load_klass); __ movl(rcx, Address(rcx, Klass::layout_helper_offset())); int diffbit = Klass::layout_helper_boolean_diffbit(); __ testl(rcx, diffbit); @@ -2643,7 +2645,8 @@ void TemplateTable::_return(TosState state) { assert(state == vtos, "only valid state"); Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax); __ movptr(robj, aaddress(0)); - __ load_klass(rdi, robj); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rdi, robj, tmp_load_klass); __ movl(rdi, Address(rdi, Klass::access_flags_offset())); __ testl(rdi, JVM_ACC_HAS_FINALIZER); Label skip_register_finalizer; @@ -3730,7 +3733,8 @@ void TemplateTable::invokevirtual_helper(Register index, // get receiver klass __ null_check(recv, oopDesc::klass_offset_in_bytes()); - __ load_klass(rax, recv); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rax, recv, tmp_load_klass); // profile this call __ profile_virtual_call(rax, rlocals, rdx); @@ -3823,7 +3827,8 @@ void TemplateTable::invokeinterface(int byte_no) { // Get receiver klass into rlocals - also a null check __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rlocals, rcx); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rlocals, rcx, tmp_load_klass); Label subtype; __ check_klass_subtype(rlocals, rax, rbcp, subtype); @@ -3846,7 +3851,7 @@ void TemplateTable::invokeinterface(int byte_no) { // Get receiver klass into rdx - also a null check __ restore_locals(); // restore r14 __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx); + __ load_klass(rdx, rcx, tmp_load_klass); Label no_such_method; @@ -4110,7 +4115,8 @@ void TemplateTable::_new() { __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops #endif - __ store_klass(rax, rcx); // klass + Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ store_klass(rax, rcx, tmp_store_klass); // klass { SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); @@ -4204,7 +4210,8 @@ void TemplateTable::checkcast() { __ load_resolved_klass_at_index(rcx, rbx, rax); __ bind(resolved); - __ load_klass(rbx, rdx); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rbx, rdx, tmp_load_klass); // Generate subtype check. Blows rcx, rdi. Object in rdx. // Superklass in rax. Subklass in rbx. @@ -4261,12 +4268,13 @@ void TemplateTable::instanceof() { __ pop_ptr(rdx); // restore receiver __ verify_oop(rdx); - __ load_klass(rdx, rdx); + Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + __ load_klass(rdx, rdx, tmp_load_klass); __ jmpb(resolved); // Get superklass in rax and subklass in rdx __ bind(quicked); - __ load_klass(rdx, rax); + __ load_klass(rdx, rax, tmp_load_klass); __ load_resolved_klass_at_index(rcx, rbx, rax); __ bind(resolved); diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp index 24e080dbe45..09322205f06 100644 --- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp +++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp @@ -195,7 +195,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // get receiver klass (also an implicit null-check) assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx"); address npe_addr = __ pc(); - __ load_klass(recv_klass_reg, rcx); + __ load_klass(recv_klass_reg, rcx, noreg); start_pc = __ pc(); @@ -213,7 +213,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Get selected method from declaring class and itable index const Register method = rbx; - __ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg + __ load_klass(recv_klass_reg, rcx, noreg); // restore recv_klass_reg __ lookup_interface_method(// inputs: rec. class, interface, itable index recv_klass_reg, holder_klass_reg, itable_index, // outputs: method, scan temp. reg diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp index 85a9fd50d3d..c6181f2d007 100644 --- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp +++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp @@ -48,6 +48,7 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. const int stub_code_length = code_size_limit(true); + Register tmp_load_klass = rscratch1; VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); // Can be NULL if there is no free space in the code cache. if (s == NULL) { @@ -80,7 +81,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // get receiver klass address npe_addr = __ pc(); - __ load_klass(rax, j_rarg0); + __ load_klass(rax, j_rarg0, tmp_load_klass); #ifndef PRODUCT if (DebugVtables) { @@ -187,7 +188,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // get receiver klass (also an implicit null-check) assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); address npe_addr = __ pc(); - __ load_klass(recv_klass_reg, j_rarg0); + __ load_klass(recv_klass_reg, j_rarg0, temp_reg); start_pc = __ pc(); @@ -205,7 +206,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // Get selected method from declaring class and itable index const Register method = rbx; - __ load_klass(recv_klass_reg, j_rarg0); // restore recv_klass_reg + __ load_klass(recv_klass_reg, j_rarg0, temp_reg); // restore recv_klass_reg __ lookup_interface_method(// inputs: rec. class, interface, itable index recv_klass_reg, holder_klass_reg, itable_index, // outputs: method, scan temp. reg diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 7b0e058c3a3..6f665642be2 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -1692,7 +1692,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const MacroAssembler masm(&cbuf); uint insts_size = cbuf.insts_size(); if (UseCompressedClassPointers) { - masm.load_klass(rscratch1, j_rarg0); + masm.load_klass(rscratch1, j_rarg0, rscratch2); masm.cmpptr(rax, rscratch1); } else { masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); @@ -6122,7 +6122,7 @@ instruct storeP(memory mem, any_RegP src) instruct storeImmP0(memory mem, immP0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreP mem zero)); ins_cost(125); // XXX @@ -6172,7 +6172,7 @@ instruct storeNKlass(memory mem, rRegN src) instruct storeImmN0(memory mem, immN0 zero) %{ - predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL); + predicate(Universe::narrow_oop_base() == NULL); match(Set mem (StoreN mem zero)); ins_cost(125); // XXX @@ -6215,7 +6215,7 @@ instruct storeImmNKlass(memory mem, immNKlass src) // Store Integer Immediate instruct storeImmI0(memory mem, immI_0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreI mem zero)); ins_cost(125); // XXX @@ -6240,7 +6240,7 @@ instruct storeImmI(memory mem, immI src) // Store Long Immediate instruct storeImmL0(memory mem, immL0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreL mem zero)); ins_cost(125); // XXX @@ -6265,7 +6265,7 @@ instruct storeImmL(memory mem, immL32 src) // Store Short/Char Immediate instruct storeImmC0(memory mem, immI_0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreC mem zero)); ins_cost(125); // XXX @@ -6291,7 +6291,7 @@ instruct storeImmI16(memory mem, immI16 src) // Store Byte Immediate instruct storeImmB0(memory mem, immI_0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreB mem zero)); ins_cost(125); // XXX @@ -6316,7 +6316,7 @@ instruct storeImmB(memory mem, immI8 src) // Store CMS card-mark Immediate instruct storeImmCM0_reg(memory mem, immI_0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreCM mem zero)); ins_cost(125); // XXX @@ -6354,7 +6354,7 @@ instruct storeF(memory mem, regF src) // Store immediate Float value (it is faster than store from XMM register) instruct storeF0(memory mem, immF0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreF mem zero)); ins_cost(25); // XXX @@ -6404,7 +6404,7 @@ instruct storeD0_imm(memory mem, immD0 src) instruct storeD0(memory mem, immD0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL)); + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); match(Set mem (StoreD mem zero)); ins_cost(25); // XXX @@ -6920,26 +6920,20 @@ instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ match(Set dst (EncodePKlass src)); - effect(KILL cr); - format %{ "encode_klass_not_null $dst,$src" %} + effect(TEMP dst, KILL cr); + format %{ "encode_and_move_klass_not_null $dst,$src" %} ins_encode %{ - __ encode_klass_not_null($dst$$Register, $src$$Register); + __ encode_and_move_klass_not_null($dst$$Register, $src$$Register); %} ins_pipe(ialu_reg_long); %} instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ match(Set dst (DecodeNKlass src)); - effect(KILL cr); - format %{ "decode_klass_not_null $dst,$src" %} + effect(TEMP dst, KILL cr); + format %{ "decode_and_move_klass_not_null $dst,$src" %} ins_encode %{ - Register s = $src$$Register; - Register d = $dst$$Register; - if (s != d) { - __ decode_klass_not_null(d, s); - } else { - __ decode_klass_not_null(d); - } + __ decode_and_move_klass_not_null($dst$$Register, $src$$Register); %} ins_pipe(ialu_reg_long); %} @@ -11879,7 +11873,7 @@ instruct testP_mem(rFlagsReg cr, memory op, immP0 zero) instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) && (Universe::narrow_klass_base() == NULL) + predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL) ZGC_ONLY( && n->in(1)->as_Load()->barrier_data() == 0 )); match(Set cr (CmpP (LoadP mem) zero)); @@ -11975,7 +11969,7 @@ instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero) instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero) %{ - predicate(Universe::narrow_oop_base() == NULL && (Universe::narrow_klass_base() == NULL)); + predicate(Universe::narrow_oop_base() == NULL); match(Set cr (CmpN (LoadN mem) zero)); format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %} @@ -12612,15 +12606,15 @@ instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, ins_pipe(pipe_slow); %} -instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{ +instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr, rRegP cx1) %{ predicate(!Compile::current()->use_rtm()); match(Set cr (FastLock object box)); - effect(TEMP tmp, TEMP scr, USE_KILL box); + effect(TEMP tmp, TEMP scr, TEMP cx1, USE_KILL box); ins_cost(300); format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %} ins_encode %{ __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, - $scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false); + $scr$$Register, $cx1$$Register, noreg, _counters, NULL, NULL, NULL, false, false); %} ins_pipe(pipe_slow); %} diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 17668ad2151..30a28f72e41 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -4699,8 +4699,10 @@ void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize_offsets(S } #endif -static int member_offset(int hardcoded_offset) { - return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes(); +static int member_offset(int hardcoded_offset, int elementSize) { + // Use with care. This function makes a lot of assumptions about the contents of the object. + // So naturally, only hardcode offsets if you know what you are doing. + return align_up((hardcoded_offset * elementSize) + instanceOopDesc::base_offset_in_bytes(), elementSize); } // Compute hard-coded offsets @@ -4709,14 +4711,14 @@ static int member_offset(int hardcoded_offset) { void JavaClasses::compute_hard_coded_offsets() { // java_lang_boxing_object - java_lang_boxing_object::value_offset = member_offset(java_lang_boxing_object::hc_value_offset); - java_lang_boxing_object::long_value_offset = align_up(member_offset(java_lang_boxing_object::hc_value_offset), BytesPerLong); + java_lang_boxing_object::value_offset = member_offset(java_lang_boxing_object::hc_value_offset, BytesPerInt); + java_lang_boxing_object::long_value_offset = member_offset(java_lang_boxing_object::hc_value_offset, BytesPerLong); // java_lang_ref_Reference - java_lang_ref_Reference::referent_offset = member_offset(java_lang_ref_Reference::hc_referent_offset); - java_lang_ref_Reference::queue_offset = member_offset(java_lang_ref_Reference::hc_queue_offset); - java_lang_ref_Reference::next_offset = member_offset(java_lang_ref_Reference::hc_next_offset); - java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset); + java_lang_ref_Reference::referent_offset = member_offset(java_lang_ref_Reference::hc_referent_offset, heapOopSize); + java_lang_ref_Reference::queue_offset = member_offset(java_lang_ref_Reference::hc_queue_offset, heapOopSize); + java_lang_ref_Reference::next_offset = member_offset(java_lang_ref_Reference::hc_next_offset, heapOopSize); + java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset, heapOopSize); } #define DO_COMPUTE_OFFSETS(k) k::compute_offsets(); diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 80958b04694..e484bb85eaf 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -1308,7 +1308,12 @@ void Metaspace::global_initialize() { { #ifdef _LP64 if (using_class_space()) { - char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); + char* base; + if (UseCompressedOops) { + base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment); + } else { + base = (char*)HeapBaseMinAddress; + } allocate_metaspace_compressed_klass_ptrs(base, 0); } #endif // _LP64 diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp index 6a6105faf53..024f9ad939c 100644 --- a/src/hotspot/share/opto/lcm.cpp +++ b/src/hotspot/share/opto/lcm.cpp @@ -264,8 +264,8 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // cannot reason about it; is probably not implicit null exception } else { const TypePtr* tptr; - if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 || - Universe::narrow_klass_shift() == 0)) { + if ((UseCompressedOops || UseCompressedClassPointers) && + (Universe::narrow_oop_shift() == 0 || Universe::narrow_klass_shift() == 0)) { // 32-bits narrow oop can be the base of address expressions tptr = base->get_ptr_type(); } else { diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 886f2f6cd9c..2843cf88052 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1647,7 +1647,9 @@ void Arguments::set_use_compressed_oops() { if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) { warning("Max heap size too large for Compressed Oops"); FLAG_SET_DEFAULT(UseCompressedOops, false); - FLAG_SET_DEFAULT(UseCompressedClassPointers, false); + if (COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS) { + FLAG_SET_DEFAULT(UseCompressedClassPointers, false); + } } } #endif // _LP64 @@ -1660,8 +1662,14 @@ void Arguments::set_use_compressed_oops() { void Arguments::set_use_compressed_klass_ptrs() { #ifndef ZERO #ifdef _LP64 - // UseCompressedOops must be on for UseCompressedClassPointers to be on. - if (!UseCompressedOops) { + // On some architectures, the use of UseCompressedClassPointers implies the use of + // UseCompressedOops. The reason is that the rheap_base register of said platforms + // is reused to perform some optimized spilling, in order to use rheap_base as a + // temp register. But by treating it as any other temp register, spilling can typically + // be completely avoided instead. So it is better not to perform this trick. And by + // not having that reliance, large heaps, or heaps not supporting compressed oops, + // can still use compressed class pointers. + if (COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS && !UseCompressedOops) { if (UseCompressedClassPointers) { warning("UseCompressedClassPointers requires UseCompressedOops"); } @@ -1765,10 +1773,7 @@ void Arguments::set_heap_size() { // Limit the heap size to ErgoHeapSizeLimit reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit); } - if (UseCompressedOops) { - // Limit the heap size to the maximum possible when using compressed oops - julong max_coop_heap = (julong)max_heap_for_compressed_oops(); - + if (UseCompressedOops || UseCompressedClassPointers) { // HeapBaseMinAddress can be greater than default but not less than. if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { if (HeapBaseMinAddress < DefaultHeapBaseMinAddress) { @@ -1781,6 +1786,10 @@ void Arguments::set_heap_size() { FLAG_SET_ERGO(size_t, HeapBaseMinAddress, DefaultHeapBaseMinAddress); } } + } + if (UseCompressedOops) { + // Limit the heap size to the maximum possible when using compressed oops + julong max_coop_heap = (julong)max_heap_for_compressed_oops(); if (HeapBaseMinAddress + MaxHeapSize < max_coop_heap) { // Heap should be above HeapBaseMinAddress to get zero based compressed oops diff --git a/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java b/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java index 58ae4d5ac3c..189d57596b0 100644 --- a/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java +++ b/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java @@ -88,13 +88,13 @@ public static void main(String... args) throws Exception { private static final String SIZE_TRANSITION_REGEX = "\\d+K\\(\\d+K\\)->\\d+K\\(\\d+K\\)"; // matches -coops metaspace size transitions - private static final String NO_COOPS_REGEX = + private static final String NO_COMPRESSED_KLASS_POINTERS_REGEX = String.format("^%s.* Metaspace: %s$", LOG_TAGS_REGEX, SIZE_TRANSITION_REGEX); // matches +coops metaspace size transitions - private static final String COOPS_REGEX = + private static final String COMPRESSED_KLASS_POINTERS_REGEX = String.format("^%s.* Metaspace: %s NonClass: %s Class: %s$", LOG_TAGS_REGEX, SIZE_TRANSITION_REGEX, @@ -107,19 +107,19 @@ public static void main(String... args) throws Exception { throw new RuntimeException("wrong number of args: " + args.length); } - final boolean hasCoops = Platform.is64bit(); - final boolean useCoops = Boolean.parseBoolean(args[0]); + final boolean hasCompressedKlassPointers = Platform.is64bit(); + final boolean useCompressedKlassPointers = Boolean.parseBoolean(args[0]); final String gcArg = args[1]; - if (!hasCoops && useCoops) { + if (!hasCompressedKlassPointers && useCompressedKlassPointers) { // No need to run this configuration. System.out.println("Skipping test."); return; } List jvmArgs = new ArrayList<>(); - if (hasCoops) { - jvmArgs.add(useCoops ? "-XX:+UseCompressedOops" : "-XX:-UseCompressedOops"); + if (hasCompressedKlassPointers) { + jvmArgs.add(useCompressedKlassPointers ? "-XX:+UseCompressedClassPointers" : "-XX:-UseCompressedClassPointers"); } jvmArgs.add(gcArg); jvmArgs.add("-Xmx256m"); @@ -136,12 +136,12 @@ public static void main(String... args) throws Exception { System.out.println(output.getStdout()); output.shouldHaveExitValue(0); - if (useCoops) { - output.stdoutShouldMatch(COOPS_REGEX); - output.stdoutShouldNotMatch(NO_COOPS_REGEX); + if (useCompressedKlassPointers) { + output.stdoutShouldMatch(COMPRESSED_KLASS_POINTERS_REGEX); + output.stdoutShouldNotMatch(NO_COMPRESSED_KLASS_POINTERS_REGEX); } else { - output.stdoutShouldMatch(NO_COOPS_REGEX); - output.stdoutShouldNotMatch(COOPS_REGEX); + output.stdoutShouldMatch(NO_COMPRESSED_KLASS_POINTERS_REGEX); + output.stdoutShouldNotMatch(COMPRESSED_KLASS_POINTERS_REGEX); } } } diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java index 0fbdeeee798..9b5d5a40208 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java @@ -25,7 +25,7 @@ * @test * @bug 8024927 * @summary Testing address of compressed class pointer space as best as possible. - * @requires vm.bits == 64 & vm.opt.final.UseCompressedOops == true & os.family != "windows" & !(os.family == "mac" & os.arch=="aarch64") + * @requires vm.bits == 64 & os.family != "windows" & !(os.family == "mac" & os.arch=="aarch64") * @library /test/lib * @modules java.base/jdk.internal.misc * java.management @@ -141,6 +141,123 @@ public static void sharingTest() throws Exception { } } + public static void smallHeapTestNoCoop() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedBaseAddress=8g", + "-Xmx128m", + "-Xlog:gc+metaspace=trace", + "-Xshare:off", + "-Xlog:cds=trace", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base: 0x0000000000000000"); + output.shouldHaveExitValue(0); + } + + public static void smallHeapTestWith1GNoCoop() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:CompressedClassSpaceSize=1g", + "-Xmx128m", + "-Xlog:gc+metaspace=trace", + "-Xshare:off", + "-Xlog:cds=trace", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base: 0x0000000000000000"); + output.shouldContain("Narrow klass shift: 0"); + output.shouldHaveExitValue(0); + } + + public static void largeHeapTestNoCoop() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:+UnlockExperimentalVMOptions", + "-Xmx30g", + "-XX:-UseAOT", // AOT explicitly set klass shift to 3. + "-Xlog:gc+metaspace=trace", + "-Xshare:off", + "-Xlog:cds=trace", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base: 0x0000000000000000"); + output.shouldContain("Narrow klass shift: 0"); + output.shouldHaveExitValue(0); + } + + public static void largePagesTestNoCoop() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-Xmx128m", + "-XX:+UseLargePages", + "-Xlog:gc+metaspace=trace", + "-XX:+VerifyBeforeGC", "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Narrow klass base:"); + output.shouldHaveExitValue(0); + } + + public static void heapBaseMinAddressTestNoCoop() throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:HeapBaseMinAddress=1m", + "-Xlog:gc+heap+coops=debug", + "-version"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("HeapBaseMinAddress must be at least"); + output.shouldHaveExitValue(0); + } + + public static void sharingTestNoCoop() throws Exception { + // Test small heaps + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./CompressedClassPointers.jsa", + "-Xmx128m", + "-XX:SharedBaseAddress=8g", + "-XX:+PrintCompressedOopsMode", + "-XX:+VerifyBeforeGC", + "-Xshare:dump", "-Xlog:cds"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + if (output.firstMatch("Shared spaces are not supported in this VM") != null) { + return; + } + try { + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:-UseCompressedOops", + "-XX:+UseCompressedClassPointers", + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./CompressedClassPointers.jsa", + "-Xmx128m", + "-XX:SharedBaseAddress=8g", + "-XX:+PrintCompressedOopsMode", + "-Xshare:on", + "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } + public static void main(String[] args) throws Exception { if (Platform.isSolaris()) { String name = System.getProperty("os.version"); @@ -154,5 +271,22 @@ public static void main(String[] args) throws Exception { largePagesTest(); heapBaseMinAddressTest(); sharingTest(); + + boolean ccpRequiresCoop = Platform.isAArch64() || Platform.isSparc(); + + if (!ccpRequiresCoop && !Platform.isOSX()) { + // Testing compressed class pointers without compressed oops. + // This is only possible if the platform supports it. Notably, + // on macOS, when compressed oops is disabled and the heap is + // given an arbitrary address, that address occasionally collides + // with where we would ideally have placed the compressed class + // space. Therefore, macOS is omitted for now. + smallHeapTestNoCoop(); + smallHeapTestWith1GNoCoop(); + largeHeapTestNoCoop(); + largePagesTestNoCoop(); + heapBaseMinAddressTestNoCoop(); + sharingTestNoCoop(); + } } } diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java index a2476e4bfba..55fcb1b0d14 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassSpaceSize.java @@ -84,14 +84,6 @@ public static void main(String[] args) throws Exception { .shouldHaveExitValue(0); - pb = ProcessTools.createJavaProcessBuilder("-XX:-UseCompressedOops", - "-XX:CompressedClassSpaceSize=1m", - "-version"); - output = new OutputAnalyzer(pb.start()); - output.shouldContain("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used") - .shouldHaveExitValue(0); - - pb = ProcessTools.createJavaProcessBuilder("-XX:-UseCompressedClassPointers", "-XX:CompressedClassSpaceSize=1m", "-version"); diff --git a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1ConcurrentMark.java b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1ConcurrentMark.java index ab2a37ee5b6..a120b52e740 100644 --- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1ConcurrentMark.java +++ b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1ConcurrentMark.java @@ -33,7 +33,7 @@ * @requires (vm.gc == "G1" | vm.gc == null) * & vm.opt.ExplicitGCInvokesConcurrent != false * @library /test/lib /test/jdk - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1ConcurrentMark + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1ConcurrentMark */ public class TestObjectCountAfterGCEventWithG1ConcurrentMark { public static void main(String[] args) throws Exception { diff --git a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1FullCollection.java b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1FullCollection.java index 3169b3eefcf..73991d13bbd 100644 --- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1FullCollection.java +++ b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithG1FullCollection.java @@ -33,7 +33,7 @@ * @requires (vm.gc == "G1" | vm.gc == null) * & vm.opt.ExplicitGCInvokesConcurrent != true * @library /test/lib /test/jdk - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseG1GC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1FullCollection + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseG1GC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1FullCollection */ public class TestObjectCountAfterGCEventWithG1FullCollection { public static void main(String[] args) throws Exception { diff --git a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithParallelOld.java b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithParallelOld.java index 97adf275366..e62400094a8 100644 --- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithParallelOld.java +++ b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithParallelOld.java @@ -32,7 +32,7 @@ * @requires vm.hasJFR * @requires vm.gc == "Parallel" | vm.gc == null * @library /test/lib /test/jdk - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithParallelOld + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithParallelOld */ public class TestObjectCountAfterGCEventWithParallelOld { public static void main(String[] args) throws Exception { diff --git a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithSerial.java b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithSerial.java index 1416a625c8c..c0ca6ff23c6 100644 --- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithSerial.java +++ b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithSerial.java @@ -32,7 +32,7 @@ * @requires vm.hasJFR * @requires vm.gc == "Serial" | vm.gc == null * @library /test/lib /test/jdk - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithSerial + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithSerial */ public class TestObjectCountAfterGCEventWithSerial { public static void main(String[] args) throws Exception { diff --git a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountEvent.java b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountEvent.java index 42a670a0e64..659050722bf 100644 --- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountEvent.java +++ b/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountEvent.java @@ -41,7 +41,7 @@ * @requires vm.hasJFR * @requires vm.gc == "Serial" | vm.gc == null * @library /test/lib /test/jdk - * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:-UseCompressedOops -XX:MarkSweepDeadRatio=0 -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountEvent + * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:MarkSweepDeadRatio=0 -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountEvent */ public class TestObjectCountEvent { private static final String objectCountEventPath = EventNames.ObjectCount;