Skip to content
Permalink
Browse files
Merge
  • Loading branch information
Andrew Haley committed Mar 17, 2020
2 parents 1953612 + 035100c commit 1a0995981c36229a2c502f67ed2b2dea32b8a8bf
@@ -7900,7 +7900,7 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix
// is allowed in legacy mode and has resources which will fit in it.
// Pure EVEX instructions will have is_evex_instruction set in their definition.
if (!attributes->is_legacy_mode()) {
if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) {
if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) {
if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) {
attributes->set_is_legacy_mode();
}
@@ -7915,7 +7915,7 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix
assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
}

_is_managed = false;
clear_managed();
if (UseAVX > 2 && !attributes->is_legacy_mode())
{
bool evex_r = (xreg_enc >= 16);
@@ -7947,7 +7947,7 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS
// is allowed in legacy mode and has resources which will fit in it.
// Pure EVEX instructions will have is_evex_instruction set in their definition.
if (!attributes->is_legacy_mode()) {
if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) {
if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) {
if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) &&
(dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) {
attributes->set_is_legacy_mode();
@@ -7969,7 +7969,7 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS
assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
}

_is_managed = false;
clear_managed();
if (UseAVX > 2 && !attributes->is_legacy_mode())
{
bool evex_r = (dst_enc >= 16);
@@ -339,15 +339,15 @@ class Address {

private:
bool base_needs_rex() const {
return _base != noreg && _base->encoding() >= 8;
return _base->is_valid() && _base->encoding() >= 8;
}

bool index_needs_rex() const {
return _index != noreg &&_index->encoding() >= 8;
return _index->is_valid() &&_index->encoding() >= 8;
}

bool xmmindex_needs_rex() const {
return _xmmindex != xnoreg && _xmmindex->encoding() >= 8;
return _xmmindex->is_valid() && _xmmindex->encoding() >= 8;
}

relocInfo::relocType reloc() const { return _rspec.type(); }
@@ -659,7 +659,7 @@ class Assembler : public AbstractAssembler {
bool _legacy_mode_dq;
bool _legacy_mode_vl;
bool _legacy_mode_vlbw;
bool _is_managed;
NOT_LP64(bool _is_managed;)

class InstructionAttr *_attributes;

@@ -870,16 +870,18 @@ class Assembler : public AbstractAssembler {
_legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
_is_managed = false;
NOT_LP64(_is_managed = false;)
_attributes = NULL;
}

void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
void clear_attributes(void) { _attributes = NULL; }

void set_managed(void) { _is_managed = true; }
void clear_managed(void) { _is_managed = false; }
bool is_managed(void) { return _is_managed; }
void set_managed(void) { NOT_LP64(_is_managed = true;) }
void clear_managed(void) { NOT_LP64(_is_managed = false;) }
bool is_managed(void) {
NOT_LP64(return _is_managed;)
LP64_ONLY(return false;) }

void lea(Register dst, Address src);

@@ -2280,22 +2282,20 @@ class InstructionAttr {
bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used
bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX
:
_avx_vector_len(vector_len),
_rex_vex_w(rex_vex_w),
_rex_vex_w_reverted(false),
_legacy_mode(legacy_mode),
_legacy_mode(legacy_mode || UseAVX < 3),
_no_reg_mask(no_reg_mask),
_uses_vl(uses_vl),
_tuple_type(Assembler::EVEX_ETUP),
_input_size_in_bits(Assembler::EVEX_NObit),
_rex_vex_w_reverted(false),
_is_evex_instruction(false),
_evex_encoding(0),
_is_clear_context(true),
_is_extended_context(false),
_avx_vector_len(vector_len),
_tuple_type(Assembler::EVEX_ETUP),
_input_size_in_bits(Assembler::EVEX_NObit),
_evex_encoding(0),
_embedded_opmask_register_specifier(0), // hard code k0
_current_assembler(NULL) {
if (UseAVX < 3) _legacy_mode = true;
}
_current_assembler(NULL) { }

~InstructionAttr() {
if (_current_assembler != NULL) {
@@ -2305,37 +2305,37 @@ class InstructionAttr {
}

private:
int _avx_vector_len;
bool _rex_vex_w;
bool _rex_vex_w_reverted;
bool _legacy_mode;
bool _no_reg_mask;
bool _uses_vl;
int _tuple_type;
int _input_size_in_bits;
bool _rex_vex_w_reverted;
bool _is_evex_instruction;
int _evex_encoding;
bool _is_clear_context;
bool _is_extended_context;
int _avx_vector_len;
int _tuple_type;
int _input_size_in_bits;
int _evex_encoding;
int _embedded_opmask_register_specifier;

Assembler *_current_assembler;

public:
// query functions for field accessors
int get_vector_len(void) const { return _avx_vector_len; }
bool is_rex_vex_w(void) const { return _rex_vex_w; }
bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
bool is_legacy_mode(void) const { return _legacy_mode; }
bool is_no_reg_mask(void) const { return _no_reg_mask; }
bool uses_vl(void) const { return _uses_vl; }
bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
bool is_evex_instruction(void) const { return _is_evex_instruction; }
bool is_clear_context(void) const { return _is_clear_context; }
bool is_extended_context(void) const { return _is_extended_context; }
int get_vector_len(void) const { return _avx_vector_len; }
int get_tuple_type(void) const { return _tuple_type; }
int get_input_size(void) const { return _input_size_in_bits; }
int is_evex_instruction(void) const { return _is_evex_instruction; }
int get_evex_encoding(void) const { return _evex_encoding; }
bool is_clear_context(void) const { return _is_clear_context; }
bool is_extended_context(void) const { return _is_extended_context; }
int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }

// Set the vector len manually
void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
@@ -2302,6 +2302,19 @@ void os::Linux::print_full_memory_info(outputStream* st) {
st->print("\n/proc/meminfo:\n");
_print_ascii_file("/proc/meminfo", st);
st->cr();

// some information regarding THPs; for details see
// https://www.kernel.org/doc/Documentation/vm/transhuge.txt
st->print_cr("/sys/kernel/mm/transparent_hugepage/enabled:");
if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/enabled", st)) {
st->print_cr(" <Not Available>");
}
st->cr();
st->print_cr("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter):");
if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/defrag", st)) {
st->print_cr(" <Not Available>");
}
st->cr();
}

void os::Linux::print_ld_preload_file(outputStream* st) {
@@ -171,11 +171,54 @@ void ShenandoahCodeRoots::arm_nmethods() {
}
}

class ShenandoahDisarmNMethodClosure : public NMethodClosure {
private:
BarrierSetNMethod* const _bs;

public:
ShenandoahDisarmNMethodClosure() :
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
}

virtual void do_nmethod(nmethod* nm) {
_bs->disarm(nm);
}
};

class ShenandoahDisarmNMethodsTask : public AbstractGangTask {
private:
ShenandoahDisarmNMethodClosure _cl;
ShenandoahConcurrentNMethodIterator _iterator;

public:
ShenandoahDisarmNMethodsTask() :
AbstractGangTask("ShenandoahDisarmNMethodsTask"),
_iterator(ShenandoahCodeRoots::table()) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_begin();
}

~ShenandoahDisarmNMethodsTask() {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_end();
}

virtual void work(uint worker_id) {
_iterator.nmethods_do(&_cl);
}
};

void ShenandoahCodeRoots::disarm_nmethods() {
ShenandoahDisarmNMethodsTask task;
ShenandoahHeap::heap()->workers()->run_task(&task);
}

class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
private:
bool _unloading_occurred;
volatile bool _failed;
ShenandoahHeap* _heap;
bool _unloading_occurred;
volatile bool _failed;
ShenandoahHeap* const _heap;
BarrierSetNMethod* const _bs;

void set_failed() {
Atomic::store(&_failed, true);
@@ -201,7 +244,8 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
_unloading_occurred(unloading_occurred),
_failed(false),
_heap(ShenandoahHeap::heap()) {}
_heap(ShenandoahHeap::heap()),
_bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {}

virtual void do_nmethod(nmethod* nm) {
assert(_heap->is_concurrent_root_in_progress(), "Only this phase");
@@ -225,10 +269,10 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
ShenandoahReentrantLocker locker(nm_data->lock());

// Heal oops and disarm
if (_heap->is_evacuation_in_progress()) {
if (_bs->is_armed(nm)) {
ShenandoahNMethod::heal_nmethod(nm);
_bs->disarm(nm);
}
ShenandoahNMethod::disarm_nmethod(nm);

// Clear compiled ICs and exception caches
if (!nm->unload_nmethod_caches(_unloading_occurred)) {
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -111,6 +111,7 @@ class ShenandoahCodeRoots : public AllStatic {
static void unlink(WorkGang* workers, bool unloading_occurred);
static void purge(WorkGang* workers);
static void arm_nmethods();
static void disarm_nmethods();
static int disarmed_value() { return _disarmed_value; }
static int* disarmed_value_address() { return &_disarmed_value; }

@@ -180,19 +180,30 @@ class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
}
};

class ShenandoahSATBThreadsClosure : public ThreadClosure {
class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
private:
ShenandoahSATBBufferClosure* _satb_cl;
MarkingCodeBlobClosure* _code_cl;
uintx _claim_token;

public:
ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
_satb_cl(satb_cl),
ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, MarkingCodeBlobClosure* code_cl) :
_satb_cl(satb_cl), _code_cl(code_cl),
_claim_token(Threads::thread_claim_token()) {}

void do_thread(Thread* thread) {
if (thread->claim_threads_do(true, _claim_token)) {
ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
if (_code_cl != NULL && thread->is_Java_thread()) {
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
JavaThread* jt = (JavaThread*)thread;
jt->nmethods_do(_code_cl);
}
}
}
};
@@ -212,26 +223,42 @@ class ShenandoahFinalMarkingTask : public AbstractGangTask {
ShenandoahHeap* heap = ShenandoahHeap::heap();

ShenandoahParallelWorkerSession worker_session(worker_id);
ReferenceProcessor* rp;
if (heap->process_references()) {
rp = heap->ref_processor();
shenandoah_assert_rp_isalive_installed();
} else {
rp = NULL;
}

// First drain remaining SATB buffers.
// Notice that this is not strictly necessary for mark-compact. But since
// it requires a StrongRootsScope around the task, we need to claim the
// threads, and performance-wise it doesn't really matter. Adds about 1ms to
// full-gc.
{
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);

ShenandoahSATBBufferClosure cl(q);
SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
ShenandoahSATBThreadsClosure tc(&cl);
Threads::threads_do(&tc);
}

ReferenceProcessor* rp;
if (heap->process_references()) {
rp = heap->ref_processor();
shenandoah_assert_rp_isalive_installed();
} else {
rp = NULL;
if (heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
if (heap->has_forwarded_objects()) {
ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, &blobsCl);
Threads::threads_do(&tc);
} else {
ShenandoahMarkRefsClosure mark_cl(q, rp);
MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, &blobsCl);
Threads::threads_do(&tc);
}
} else {
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, NULL);
Threads::threads_do(&tc);
}
}

if (heap->is_degenerated_gc_in_progress()) {

0 comments on commit 1a09959

Please sign in to comment.