Skip to content

Commit

Permalink
[Backport] 8241825: Make compressed oops and compressed class pointer…
Browse files Browse the repository at this point in the history
…s independent(x86_64)

Summary: Make UseCompressedOops and UseCompressedClassPointers independent on x86_64

Test Plan: CICD

Reviewed-by: kuaiwei, yulei, lingjun

Issue: #669
  • Loading branch information
mmyxym committed Sep 13, 2023
1 parent 8dd1e93 commit 1e2c013
Show file tree
Hide file tree
Showing 32 changed files with 360 additions and 206 deletions.
2 changes: 2 additions & 0 deletions src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,4 +62,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define NOT_R18_RESERVED(code) code
#endif

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP
2 changes: 2 additions & 0 deletions src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,4 +62,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// Define the condition to use this -XX flag.
#define USE_POLL_BIT_ONLY UseSIGTRAP

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
2 changes: 2 additions & 0 deletions src/hotspot/cpu/s390/globalDefinitions_s390.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;

#define THREAD_LOCAL_POLL

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_S390_VM_GLOBALDEFINITIONS_S390_HPP
2 changes: 2 additions & 0 deletions src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// SPARC have implemented the local polling
#define THREAD_LOCAL_POLL

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true

#endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
2 changes: 1 addition & 1 deletion src/hotspot/cpu/x86/c1_FrameMap_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@

static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
if (UseCompressedOops) return range - 1;
return range;
}

Expand Down
44 changes: 25 additions & 19 deletions src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1176,6 +1176,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch

LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
Expand Down Expand Up @@ -1353,7 +1354,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64
if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
__ decode_klass_not_null(dest->as_register(), tmp_load_klass);
}
#endif
}
Expand Down Expand Up @@ -1641,6 +1642,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register dst = op->result_opr()->as_register();
ciKlass* k = op->klass();
Register Rtmp1 = noreg;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

// check if it needs to be profiled
ciMethodData* md = NULL;
Expand Down Expand Up @@ -1704,7 +1706,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// not a safepoint as obj null check happens earlier
#ifdef _LP64
if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj);
__ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1);
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
Expand All @@ -1721,7 +1723,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else {
// get object class
// not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj);
__ load_klass(klass_RInfo, obj, tmp_load_klass);
if (k->is_loaded()) {
// See if we get an immediate positive hit
#ifdef _LP64
Expand Down Expand Up @@ -1776,7 +1778,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj);
__ load_klass(recv, obj, tmp_load_klass);
Label update_done;
type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success);
Expand All @@ -1792,6 +1794,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L


void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
LIR_Code code = op->code();
if (code == lir_store_check) {
Register value = op->object()->as_register();
Expand Down Expand Up @@ -1837,8 +1840,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
}

add_debug_info_for_null_check_here(op->info_for_exception());
__ load_klass(k_RInfo, array);
__ load_klass(klass_RInfo, value);
__ load_klass(k_RInfo, array, tmp_load_klass);
__ load_klass(klass_RInfo, value, tmp_load_klass);

// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
Expand All @@ -1859,7 +1862,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, value);
__ load_klass(recv, value, tmp_load_klass);
Label update_done;
type_profile_helper(mdo, md, data, recv, &done);
__ jmpb(done);
Expand Down Expand Up @@ -3057,6 +3060,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register dst_pos = op->dst_pos()->as_register();
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

CodeStub* stub = op->stub();
int flags = op->flags();
Expand Down Expand Up @@ -3202,13 +3206,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ load_klass(tmp, dst, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}

if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ load_klass(tmp, src, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry());
}
Expand Down Expand Up @@ -3265,8 +3269,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ push(src);
__ push(dst);

__ load_klass(src, src);
__ load_klass(dst, dst);
__ load_klass(src, src, tmp_load_klass);
__ load_klass(dst, dst, tmp_load_klass);

__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);

Expand Down Expand Up @@ -3294,9 +3298,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(flags & mask, "one of the two should be known to be an object array");

if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ load_klass(tmp, src, tmp_load_klass);
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ load_klass(tmp, dst, tmp_load_klass);
}
int lh_offset = in_bytes(Klass::layout_helper_offset());
Address klass_lh_addr(tmp, lh_offset);
Expand Down Expand Up @@ -3340,14 +3344,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst);
__ load_klass(c_rarg3, dst, tmp_load_klass);
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ load_klass(c_rarg4, dst);
__ load_klass(c_rarg4, dst, tmp_load_klass);
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr));
Expand Down Expand Up @@ -3412,7 +3416,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64
if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp);
__ encode_klass_not_null(tmp, rscratch1);
}
#endif

Expand Down Expand Up @@ -3516,6 +3520,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
ciMethod* callee = op->profiled_callee();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);

// Update counter for all call types
ciMethodData* md = method->method_data_or_null();
Expand Down Expand Up @@ -3568,7 +3573,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
}
}
} else {
__ load_klass(recv, recv);
__ load_klass(recv, recv, tmp_load_klass);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it.
Expand All @@ -3586,6 +3591,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Register obj = op->obj()->as_register();
Register tmp = op->tmp()->as_pointer_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
ciKlass* exact_klass = op->exact_klass();
intptr_t current_klass = op->current_klass();
Expand Down Expand Up @@ -3632,7 +3638,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#ifdef ASSERT
if (exact_klass != NULL) {
Label ok;
__ load_klass(tmp, tmp);
__ load_klass(tmp, tmp, tmp_load_klass);
__ push(tmp);
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ cmpptr(tmp, Address(rsp, 0));
Expand All @@ -3647,7 +3653,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (exact_klass != NULL) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
} else {
__ load_klass(tmp, tmp);
__ load_klass(tmp, tmp, tmp_load_klass);
}

__ xorptr(tmp, mdo_addr);
Expand Down
10 changes: 7 additions & 3 deletions src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr

if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point");
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);

} else {
null_check_offset = offset();
}
Expand Down Expand Up @@ -152,6 +154,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i

void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len);
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset()));
Expand All @@ -163,7 +166,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
#ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass);
encode_klass_not_null(t1);
encode_klass_not_null(t1, tmp_encode_klass);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else
#endif
Expand Down Expand Up @@ -298,9 +301,10 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
Register tmp_load_klass = LP64_ONLY(rscratch2) NOT_LP64(noreg);

if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver);
load_klass(rscratch1, receiver, tmp_load_klass);
cmpptr(rscratch1, iCache);
} else {
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1242,8 +1242,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {

// load the klass and check the has finalizer flag
Label register_finalizer;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register t = rsi;
__ load_klass(t, rax);
__ load_klass(t, rax, tmp_load_klass);
__ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer);
Expand Down
6 changes: 6 additions & 0 deletions src/hotspot/cpu/x86/globalDefinitions_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,10 @@ const bool CCallingConventionRequiresIntsAsLongs = false;

#define THREAD_LOCAL_POLL

#if INCLUDE_JVMCI
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT)
#else
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif

#endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
6 changes: 4 additions & 2 deletions src/hotspot/cpu/x86/interp_masm_x86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
jmpb(next);

bind(update);
load_klass(obj, obj);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(obj, obj, tmp_load_klass);

xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask);
Expand Down Expand Up @@ -1188,7 +1189,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movptr(obj_reg, Address(lock_reg, obj_offset));

if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
}

// Load immediate 1 into swap_reg %rax
Expand Down
Loading

0 comments on commit 1e2c013

Please sign in to comment.