Skip to content
Permalink
caa2f4cad6
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
2760 lines (2416 sloc) 101 KB
/*
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvm.h"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/codeHeapState.hpp"
#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
#include "interpreter/linkResolver.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/nativeLookup.hpp"
#include "prims/whitebox.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
#if INCLUDE_JVMCI
#include "jvmci/jvmciCompiler.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "runtime/vframe.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2compiler.hpp"
#endif
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
{ \
Symbol* klass_name = (method)->klass_name(); \
Symbol* name = (method)->name(); \
Symbol* signature = (method)->signature(); \
HOTSPOT_METHOD_COMPILE_BEGIN( \
(char *) comp_name, strlen(comp_name), \
(char *) klass_name->bytes(), klass_name->utf8_length(), \
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length()); \
}
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
{ \
Symbol* klass_name = (method)->klass_name(); \
Symbol* name = (method)->name(); \
Symbol* signature = (method)->signature(); \
HOTSPOT_METHOD_COMPILE_END( \
(char *) comp_name, strlen(comp_name), \
(char *) klass_name->bytes(), klass_name->utf8_length(), \
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length(), (success)); \
}
#else // ndef DTRACE_ENABLED
#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
#endif // ndef DTRACE_ENABLED
bool CompileBroker::_initialized = false;
volatile bool CompileBroker::_should_block = false;
volatile int CompileBroker::_print_compilation_warning = 0;
volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
// The installed compiler(s)
AbstractCompiler* CompileBroker::_compilers[2];
// The maximum numbers of compiler threads to be determined during startup.
int CompileBroker::_c1_count = 0;
int CompileBroker::_c2_count = 0;
// An array of compiler names as Java String objects
jobject* CompileBroker::_compiler1_objects = NULL;
jobject* CompileBroker::_compiler2_objects = NULL;
CompileLog** CompileBroker::_compiler1_logs = NULL;
CompileLog** CompileBroker::_compiler2_logs = NULL;
// These counters are used to assign an unique ID to each compilation.
volatile jint CompileBroker::_compilation_id = 0;
volatile jint CompileBroker::_osr_compilation_id = 0;
// Debugging information
int CompileBroker::_last_compile_type = no_compile;
int CompileBroker::_last_compile_level = CompLevel_none;
char CompileBroker::_last_method_compiled[CompileBroker::name_buffer_length];
// Performance counters
PerfCounter* CompileBroker::_perf_total_compilation = NULL;
PerfCounter* CompileBroker::_perf_osr_compilation = NULL;
PerfCounter* CompileBroker::_perf_standard_compilation = NULL;
PerfCounter* CompileBroker::_perf_total_bailout_count = NULL;
PerfCounter* CompileBroker::_perf_total_invalidated_count = NULL;
PerfCounter* CompileBroker::_perf_total_compile_count = NULL;
PerfCounter* CompileBroker::_perf_total_osr_compile_count = NULL;
PerfCounter* CompileBroker::_perf_total_standard_compile_count = NULL;
PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = NULL;
PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = NULL;
PerfCounter* CompileBroker::_perf_sum_nmethod_size = NULL;
PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = NULL;
PerfStringVariable* CompileBroker::_perf_last_method = NULL;
PerfStringVariable* CompileBroker::_perf_last_failed_method = NULL;
PerfStringVariable* CompileBroker::_perf_last_invalidated_method = NULL;
PerfVariable* CompileBroker::_perf_last_compile_type = NULL;
PerfVariable* CompileBroker::_perf_last_compile_size = NULL;
PerfVariable* CompileBroker::_perf_last_failed_type = NULL;
PerfVariable* CompileBroker::_perf_last_invalidated_type = NULL;
// Timers and counters for generating statistics
elapsedTimer CompileBroker::_t_total_compilation;
elapsedTimer CompileBroker::_t_osr_compilation;
elapsedTimer CompileBroker::_t_standard_compilation;
elapsedTimer CompileBroker::_t_invalidated_compilation;
elapsedTimer CompileBroker::_t_bailedout_compilation;
int CompileBroker::_total_bailout_count = 0;
int CompileBroker::_total_invalidated_count = 0;
int CompileBroker::_total_compile_count = 0;
int CompileBroker::_total_osr_compile_count = 0;
int CompileBroker::_total_standard_compile_count = 0;
int CompileBroker::_total_compiler_stopped_count = 0;
int CompileBroker::_total_compiler_restarted_count = 0;
int CompileBroker::_sum_osr_bytes_compiled = 0;
int CompileBroker::_sum_standard_bytes_compiled = 0;
int CompileBroker::_sum_nmethod_size = 0;
int CompileBroker::_sum_nmethod_code_size = 0;
long CompileBroker::_peak_compilation_time = 0;
CompileQueue* CompileBroker::_c2_compile_queue = NULL;
CompileQueue* CompileBroker::_c1_compile_queue = NULL;
class CompilationLog : public StringEventLog {
public:
CompilationLog() : StringEventLog("Compilation events") {
}
void log_compile(JavaThread* thread, CompileTask* task) {
StringLogMessage lm;
stringStream sstr(lm.buffer(), lm.size());
// msg.time_stamp().update_to(tty->time_stamp().ticks());
task->print(&sstr, NULL, true, false);
log(thread, "%s", (const char*)lm);
}
void log_nmethod(JavaThread* thread, nmethod* nm) {
log(thread, "nmethod %d%s " INTPTR_FORMAT " code [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",
nm->compile_id(), nm->is_osr_method() ? "%" : "",
p2i(nm), p2i(nm->code_begin()), p2i(nm->code_end()));
}
void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
StringLogMessage lm;
lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason);
if (retry_message != NULL) {
lm.append(" (%s)", retry_message);
}
lm.print("\n");
log(thread, "%s", (const char*)lm);
}
void log_metaspace_failure(const char* reason) {
ResourceMark rm;
StringLogMessage lm;
lm.print("%4d COMPILE PROFILING SKIPPED: %s", -1, reason);
lm.print("\n");
log(JavaThread::current(), "%s", (const char*)lm);
}
};
static CompilationLog* _compilation_log = NULL;
bool compileBroker_init() {
if (LogEvents) {
_compilation_log = new CompilationLog();
}
// init directives stack, adding default directive
DirectivesStack::init();
if (DirectivesParser::has_file()) {
return DirectivesParser::parse_from_flag();
} else if (CompilerDirectivesPrint) {
// Print default directive even when no other was added
DirectivesStack::print(tty);
}
return true;
}
CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
CompilerThread* thread = CompilerThread::current();
thread->set_task(task);
#if INCLUDE_JVMCI
if (task->is_blocking() && CompileBroker::compiler(task->comp_level())->is_jvmci()) {
task->set_jvmci_compiler_thread(thread);
}
#endif
CompileLog* log = thread->log();
if (log != NULL && !task->is_unloaded()) task->log_task_start(log);
}
CompileTaskWrapper::~CompileTaskWrapper() {
CompilerThread* thread = CompilerThread::current();
CompileTask* task = thread->task();
CompileLog* log = thread->log();
if (log != NULL && !task->is_unloaded()) task->log_task_done(log);
thread->set_task(NULL);
task->set_code_handle(NULL);
thread->set_env(NULL);
if (task->is_blocking()) {
bool free_task = false;
{
MutexLocker notifier(task->lock(), thread);
task->mark_complete();
#if INCLUDE_JVMCI
if (CompileBroker::compiler(task->comp_level())->is_jvmci()) {
if (!task->has_waiter()) {
// The waiting thread timed out and thus did not free the task.
free_task = true;
}
task->set_jvmci_compiler_thread(NULL);
}
#endif
if (!free_task) {
// Notify the waiting thread that the compilation has completed
// so that it can free the task.
task->lock()->notify_all();
}
}
if (free_task) {
// The task can only be freed once the task lock is released.
CompileTask::free(task);
}
} else {
task->mark_complete();
// By convention, the compiling thread is responsible for
// recycling a non-blocking CompileTask.
CompileTask::free(task);
}
}
/**
* Check if a CompilerThread can be removed and update count if requested.
*/
static bool can_remove(CompilerThread *ct, bool do_it) {
assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here");
if (!ReduceNumberOfCompilerThreads) return false;
AbstractCompiler *compiler = ct->compiler();
int compiler_count = compiler->num_compiler_threads();
bool c1 = compiler->is_c1();
// Keep at least 1 compiler thread of each type.
if (compiler_count < 2) return false;
// Keep thread alive for at least some time.
if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false;
// We only allow the last compiler thread of each type to get removed.
jobject last_compiler = c1 ? CompileBroker::compiler1_object(compiler_count - 1)
: CompileBroker::compiler2_object(compiler_count - 1);
if (oopDesc::equals(ct->threadObj(), JNIHandles::resolve_non_null(last_compiler))) {
if (do_it) {
assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
compiler->set_num_compiler_threads(compiler_count - 1);
}
return true;
}
return false;
}
/**
* Add a CompileTask to a CompileQueue.
*/
void CompileQueue::add(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
task->set_next(NULL);
task->set_prev(NULL);
if (_last == NULL) {
// The compile queue is empty.
assert(_first == NULL, "queue is empty");
_first = task;
_last = task;
} else {
// Append the task to the queue.
assert(_last->next() == NULL, "not last");
_last->set_next(task);
task->set_prev(_last);
_last = task;
}
++_size;
// Mark the method as being in the compile queue.
task->method()->set_queued_for_compilation();
if (CIPrintCompileQueue) {
print_tty();
}
if (LogCompilation && xtty != NULL) {
task->log_task_queued();
}
// Notify CompilerThreads that a task is available.
MethodCompileQueue_lock->notify_all();
}
/**
* Empties compilation queue by putting all compilation tasks onto
* a freelist. Furthermore, the method wakes up all threads that are
* waiting on a compilation task to finish. This can happen if background
* compilation is disabled.
*/
void CompileQueue::free_all() {
MutexLocker mu(MethodCompileQueue_lock);
CompileTask* next = _first;
// Iterate over all tasks in the compile queue
while (next != NULL) {
CompileTask* current = next;
next = current->next();
{
// Wake up thread that blocks on the compile task.
MutexLocker ct_lock(current->lock());
current->lock()->notify();
}
// Put the task back on the freelist.
CompileTask::free(current);
}
_first = NULL;
// Wake up all threads that block on the queue.
MethodCompileQueue_lock->notify_all();
}
/**
* Get the next CompileTask from a CompileQueue
*/
CompileTask* CompileQueue::get() {
// save methods from RedefineClasses across safepoint
// across MethodCompileQueue_lock below.
methodHandle save_method;
methodHandle save_hot_method;
MutexLocker locker(MethodCompileQueue_lock);
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
// we ran out of code cache so compilation has been disabled. In the latter
// case we perform code cache sweeps to free memory such that we can re-enable
// compilation.
while (_first == NULL) {
// Exit loop if compilation is disabled forever
if (CompileBroker::is_compilation_disabled_forever()) {
return NULL;
}
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
if (UseDynamicNumberOfCompilerThreads && _first == NULL) {
// Still nothing to compile. Give caller a chance to stop this thread.
if (can_remove(CompilerThread::current(), false)) return NULL;
}
}
if (CompileBroker::is_compilation_disabled_forever()) {
return NULL;
}
CompileTask* task;
{
NoSafepointVerifier nsv;
task = CompilationPolicy::policy()->select_task(this);
if (task != NULL) {
task = task->select_for_compilation();
}
}
if (task != NULL) {
// Save method pointers across unlock safepoint. The task is removed from
// the compilation queue, which is walked during RedefineClasses.
save_method = methodHandle(task->method());
save_hot_method = methodHandle(task->hot_method());
remove(task);
}
purge_stale_tasks(); // may temporarily release MCQ lock
return task;
}
// Clean & deallocate stale compile tasks.
// Temporarily releases MethodCompileQueue lock.
void CompileQueue::purge_stale_tasks() {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
if (_first_stale != NULL) {
// Stale tasks are purged when MCQ lock is released,
// but _first_stale updates are protected by MCQ lock.
// Once task processing starts and MCQ lock is released,
// other compiler threads can reuse _first_stale.
CompileTask* head = _first_stale;
_first_stale = NULL;
{
MutexUnlocker ul(MethodCompileQueue_lock);
for (CompileTask* task = head; task != NULL; ) {
CompileTask* next_task = task->next();
CompileTaskWrapper ctw(task); // Frees the task
task->set_failure_reason("stale task");
task = next_task;
}
}
}
}
void CompileQueue::remove(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
if (task->prev() != NULL) {
task->prev()->set_next(task->next());
} else {
// max is the first element
assert(task == _first, "Sanity");
_first = task->next();
}
if (task->next() != NULL) {
task->next()->set_prev(task->prev());
} else {
// max is the last element
assert(task == _last, "Sanity");
_last = task->prev();
}
--_size;
}
void CompileQueue::remove_and_mark_stale(CompileTask* task) {
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
remove(task);
// Enqueue the task for reclamation (should be done outside MCQ lock)
task->set_next(_first_stale);
task->set_prev(NULL);
_first_stale = task;
}
// methods in the compile queue need to be marked as used on the stack
// so that they don't get reclaimed by Redefine Classes
void CompileQueue::mark_on_stack() {
CompileTask* task = _first;
while (task != NULL) {
task->mark_on_stack();
task = task->next();
}
}
CompileQueue* CompileBroker::compile_queue(int comp_level) {
if (is_c2_compile(comp_level)) return _c2_compile_queue;
if (is_c1_compile(comp_level)) return _c1_compile_queue;
return NULL;
}
void CompileBroker::print_compile_queues(outputStream* st) {
st->print_cr("Current compiles: ");
char buf[2000];
int buflen = sizeof(buf);
Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true);
st->cr();
if (_c1_compile_queue != NULL) {
_c1_compile_queue->print(st);
}
if (_c2_compile_queue != NULL) {
_c2_compile_queue->print(st);
}
}
void CompileQueue::print(outputStream* st) {
assert_locked_or_safepoint(MethodCompileQueue_lock);
st->print_cr("%s:", name());
CompileTask* task = _first;
if (task == NULL) {
st->print_cr("Empty");
} else {
while (task != NULL) {
task->print(st, NULL, true, true);
task = task->next();
}
}
st->cr();
}
void CompileQueue::print_tty() {
ResourceMark rm;
stringStream ss;
// Dump the compile queue into a buffer before locking the tty
print(&ss);
{
ttyLocker ttyl;
tty->print("%s", ss.as_string());
}
}
CompilerCounters::CompilerCounters() {
_current_method[0] = '\0';
_compile_type = CompileBroker::no_compile;
}
// ------------------------------------------------------------------
// CompileBroker::compilation_init
//
// Initialize the Compilation object
void CompileBroker::compilation_init_phase1(TRAPS) {
_last_method_compiled[0] = '\0';
// No need to initialize compilation system if we do not use it.
if (!UseCompiler) {
return;
}
// Set the interface to the current compiler(s).
_c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
_c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
#if INCLUDE_JVMCI
if (EnableJVMCI) {
// This is creating a JVMCICompiler singleton.
JVMCICompiler* jvmci = new JVMCICompiler();
if (UseJVMCICompiler) {
_compilers[1] = jvmci;
if (FLAG_IS_DEFAULT(JVMCIThreads)) {
if (BootstrapJVMCI) {
// JVMCI will bootstrap so give it more threads
_c2_count = MIN2(32, os::active_processor_count());
}
} else {
_c2_count = JVMCIThreads;
}
if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
} else {
_c1_count = JVMCIHostThreads;
}
}
}
#endif // INCLUDE_JVMCI
#ifdef COMPILER1
if (_c1_count > 0) {
_compilers[0] = new Compiler();
}
#endif // COMPILER1
#ifdef COMPILER2
if (true JVMCI_ONLY( && !UseJVMCICompiler)) {
if (_c2_count > 0) {
_compilers[1] = new C2Compiler();
}
}
#endif // COMPILER2
// Start the compiler thread(s) and the sweeper thread
init_compiler_sweeper_threads();
// totalTime performance counter is always created as it is required
// by the implementation of java.lang.management.CompilationMBean.
{
EXCEPTION_MARK;
_perf_total_compilation =
PerfDataManager::create_counter(JAVA_CI, "totalTime",
PerfData::U_Ticks, CHECK);
}
if (UsePerfData) {
EXCEPTION_MARK;
// create the jvmstat performance counters
_perf_osr_compilation =
PerfDataManager::create_counter(SUN_CI, "osrTime",
PerfData::U_Ticks, CHECK);
_perf_standard_compilation =
PerfDataManager::create_counter(SUN_CI, "standardTime",
PerfData::U_Ticks, CHECK);
_perf_total_bailout_count =
PerfDataManager::create_counter(SUN_CI, "totalBailouts",
PerfData::U_Events, CHECK);
_perf_total_invalidated_count =
PerfDataManager::create_counter(SUN_CI, "totalInvalidates",
PerfData::U_Events, CHECK);
_perf_total_compile_count =
PerfDataManager::create_counter(SUN_CI, "totalCompiles",
PerfData::U_Events, CHECK);
_perf_total_osr_compile_count =
PerfDataManager::create_counter(SUN_CI, "osrCompiles",
PerfData::U_Events, CHECK);
_perf_total_standard_compile_count =
PerfDataManager::create_counter(SUN_CI, "standardCompiles",
PerfData::U_Events, CHECK);
_perf_sum_osr_bytes_compiled =
PerfDataManager::create_counter(SUN_CI, "osrBytes",
PerfData::U_Bytes, CHECK);
_perf_sum_standard_bytes_compiled =
PerfDataManager::create_counter(SUN_CI, "standardBytes",
PerfData::U_Bytes, CHECK);
_perf_sum_nmethod_size =
PerfDataManager::create_counter(SUN_CI, "nmethodSize",
PerfData::U_Bytes, CHECK);
_perf_sum_nmethod_code_size =
PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize",
PerfData::U_Bytes, CHECK);
_perf_last_method =
PerfDataManager::create_string_variable(SUN_CI, "lastMethod",
CompilerCounters::cmname_buffer_length,
"", CHECK);
_perf_last_failed_method =
PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod",
CompilerCounters::cmname_buffer_length,
"", CHECK);
_perf_last_invalidated_method =
PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod",
CompilerCounters::cmname_buffer_length,
"", CHECK);
_perf_last_compile_type =
PerfDataManager::create_variable(SUN_CI, "lastType",
PerfData::U_None,
(jlong)CompileBroker::no_compile,
CHECK);
_perf_last_compile_size =
PerfDataManager::create_variable(SUN_CI, "lastSize",
PerfData::U_Bytes,
(jlong)CompileBroker::no_compile,
CHECK);
_perf_last_failed_type =
PerfDataManager::create_variable(SUN_CI, "lastFailedType",
PerfData::U_None,
(jlong)CompileBroker::no_compile,
CHECK);
_perf_last_invalidated_type =
PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType",
PerfData::U_None,
(jlong)CompileBroker::no_compile,
CHECK);
}
}
// Completes compiler initialization. Compilation requests submitted
// prior to this will be silently ignored.
void CompileBroker::compilation_init_phase2() {
_initialized = true;
}
Handle CompileBroker::create_thread_oop(const char* name, TRAPS) {
Handle string = java_lang_String::create_from_str(name, CHECK_NH);
Handle thread_group(THREAD, Universe::system_thread_group());
return JavaCalls::construct_new_instance(
SystemDictionary::Thread_klass(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK_NH);
}
JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, TRAPS) {
JavaThread* thread = NULL;
{
MutexLocker mu(Threads_lock, THREAD);
if (comp != NULL) {
if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
CompilerCounters* counters = new CompilerCounters();
thread = new CompilerThread(queue, counters);
}
} else {
thread = new CodeCacheSweeperThread();
}
// At this point the new CompilerThread data-races with this startup
// thread (which I believe is the primoridal thread and NOT the VM
// thread). This means Java bytecodes being executed at startup can
// queue compile jobs which will run at whatever default priority the
// newly created CompilerThread runs at.
// At this point it may be possible that no osthread was created for the
// JavaThread due to lack of memory. We would have to throw an exception
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails. But first release the
// lock.
if (thread != NULL && thread->osthread() != NULL) {
java_lang_Thread::set_thread(JNIHandles::resolve_non_null(thread_handle), thread);
// Note that this only sets the JavaThread _priority field, which by
// definition is limited to Java priorities and not OS priorities.
// The os-priority is set in the CompilerThread startup code itself
java_lang_Thread::set_priority(JNIHandles::resolve_non_null(thread_handle), NearMaxPriority);
// Note that we cannot call os::set_priority because it expects Java
// priorities and we are *explicitly* using OS priorities so that it's
// possible to set the compiler thread priority higher than any Java
// thread.
int native_prio = CompilerThreadPriority;
if (native_prio == -1) {
if (UseCriticalCompilerThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
}
os::set_native_priority(thread, native_prio);
java_lang_Thread::set_daemon(JNIHandles::resolve_non_null(thread_handle));
thread->set_threadObj(JNIHandles::resolve_non_null(thread_handle));
if (comp != NULL) {
thread->as_CompilerThread()->set_compiler(comp);
}
Threads::add(thread);
Thread::start(thread);
}
}
// First release lock before aborting VM.
if (thread == NULL || thread->osthread() == NULL) {
if (UseDynamicNumberOfCompilerThreads && comp != NULL && comp->num_compiler_threads() > 0) {
if (thread != NULL) {
thread->smr_delete();
}
return NULL;
}
vm_exit_during_initialization("java.lang.OutOfMemoryError",
os::native_thread_creation_failed_msg());
}
// Let go of Threads_lock before yielding
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
return thread;
}
void CompileBroker::init_compiler_sweeper_threads() {
EXCEPTION_MARK;
#if !defined(ZERO)
assert(_c2_count > 0 || _c1_count > 0, "No compilers?");
#endif // !ZERO
// Initialize the compilation queue
if (_c2_count > 0) {
const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
_c2_compile_queue = new CompileQueue(name);
_compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler);
_compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler);
}
if (_c1_count > 0) {
_c1_compile_queue = new CompileQueue("C1 compile queue");
_compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler);
_compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler);
}
char name_buffer[256];
for (int i = 0; i < _c2_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
Handle thread_oop = create_thread_oop(name_buffer, CHECK);
jobject thread_handle = JNIHandles::make_global(thread_oop);
_compiler2_objects[i] = thread_handle;
_compiler2_logs[i] = NULL;
if (!UseDynamicNumberOfCompilerThreads || i == 0) {
JavaThread *ct = make_thread(thread_handle, _c2_compile_queue, _compilers[1], CHECK);
assert(ct != NULL, "should have been handled for initial thread");
_compilers[1]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
MutexLocker mu(Threads_lock);
tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
}
}
}
for (int i = 0; i < _c1_count; i++) {
// Create a name for our thread.
sprintf(name_buffer, "C1 CompilerThread%d", i);
Handle thread_oop = create_thread_oop(name_buffer, CHECK);
jobject thread_handle = JNIHandles::make_global(thread_oop);
_compiler1_objects[i] = thread_handle;
_compiler1_logs[i] = NULL;
if (!UseDynamicNumberOfCompilerThreads || i == 0) {
JavaThread *ct = make_thread(thread_handle, _c1_compile_queue, _compilers[0], CHECK);
assert(ct != NULL, "should have been handled for initial thread");
_compilers[0]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
MutexLocker mu(Threads_lock);
tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
}
}
}
if (UsePerfData) {
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK);
}
if (MethodFlushing) {
// Initialize the sweeper thread
Handle thread_oop = create_thread_oop("Sweeper thread", CHECK);
jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
make_thread(thread_handle, NULL, NULL, CHECK);
}
}
void CompileBroker::possibly_add_compiler_threads() {
EXCEPTION_MARK;
julong available_memory = os::available_memory();
// If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All).
size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled),
available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled);
// Only do attempt to start additional threads if the lock is free.
if (!CompileThread_lock->try_lock()) return;
if (_c2_compile_queue != NULL) {
int old_c2_count = _compilers[1]->num_compiler_threads();
int new_c2_count = MIN4(_c2_count,
_c2_compile_queue->size() / 2,
(int)(available_memory / (200*M)),
(int)(available_cc_np / (128*K)));
for (int i = old_c2_count; i < new_c2_count; i++) {
JavaThread *ct = make_thread(compiler2_object(i), _c2_compile_queue, _compilers[1], CHECK);
if (ct == NULL) break;
_compilers[1]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
MutexLocker mu(Threads_lock);
tty->print_cr("Added compiler thread %s (available memory: %dMB, available non-profiled code cache: %dMB)",
ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_np/M));
}
}
}
if (_c1_compile_queue != NULL) {
int old_c1_count = _compilers[0]->num_compiler_threads();
int new_c1_count = MIN4(_c1_count,
_c1_compile_queue->size() / 4,
(int)(available_memory / (100*M)),
(int)(available_cc_p / (128*K)));
for (int i = old_c1_count; i < new_c1_count; i++) {
JavaThread *ct = make_thread(compiler1_object(i), _c1_compile_queue, _compilers[0], CHECK);
if (ct == NULL) break;
_compilers[0]->set_num_compiler_threads(i + 1);
if (TraceCompilerThreads) {
ResourceMark rm;
MutexLocker mu(Threads_lock);
tty->print_cr("Added compiler thread %s (available memory: %dMB, available profiled code cache: %dMB)",
ct->get_thread_name(), (int)(available_memory/M), (int)(available_cc_p/M));
}
}
}
CompileThread_lock->unlock();
}
/**
* Set the methods on the stack as on_stack so that redefine classes doesn't
* reclaim them. This method is executed at a safepoint.
*/
void CompileBroker::mark_on_stack() {
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
// Since we are at a safepoint, we do not need a lock to access
// the compile queues.
if (_c2_compile_queue != NULL) {
_c2_compile_queue->mark_on_stack();
}
if (_c1_compile_queue != NULL) {
_c1_compile_queue->mark_on_stack();
}
}
// ------------------------------------------------------------------
// CompileBroker::compile_method
//
// Request compilation of a method.
void CompileBroker::compile_method_base(const methodHandle& method,
int osr_bci,
int comp_level,
const methodHandle& hot_method,
int hot_count,
CompileTask::CompileReason compile_reason,
bool blocking,
Thread* thread) {
guarantee(!method->is_abstract(), "cannot compile abstract methods");
assert(method->method_holder()->is_instance_klass(),
"sanity check");
assert(!method->method_holder()->is_not_initialized(),
"method holder must be initialized");
assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys");
if (CIPrintRequests) {
tty->print("request: ");
method->print_short_name(tty);
if (osr_bci != InvocationEntryBci) {
tty->print(" osr_bci: %d", osr_bci);
}
tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count);
if (!hot_method.is_null()) {
tty->print(" hot: ");
if (hot_method() != method()) {
hot_method->print_short_name(tty);
} else {
tty->print("yes");
}
}
tty->cr();
}
// A request has been made for compilation. Before we do any
// real work, check to see if the method has been compiled
// in the meantime with a definitive result.
if (compilation_is_complete(method, osr_bci, comp_level)) {
return;
}
#ifndef PRODUCT
if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) {
if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) {
// Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI.
return;
}
}
#endif
// If this method is already in the compile queue, then
// we do not block the current thread.
if (compilation_is_in_queue(method)) {
// We may want to decay our counter a bit here to prevent
// multiple denied requests for compilation. This is an
// open compilation policy issue. Note: The other possibility,
// in the case that this is a blocking compile request, is to have
// all subsequent blocking requesters wait for completion of
// ongoing compiles. Note that in this case we'll need a protocol
// for freeing the associated compile tasks. [Or we could have
// a single static monitor on which all these waiters sleep.]
return;
}
if (TieredCompilation) {
// Tiered policy requires MethodCounters to exist before adding a method to
// the queue. Create if we don't have them yet.
method->get_method_counters(thread);
}
// Outputs from the following MutexLocker block:
CompileTask* task = NULL;
CompileQueue* queue = compile_queue(comp_level);
// Acquire our lock.
{
MutexLocker locker(MethodCompileQueue_lock, thread);
// Make sure the method has not slipped into the queues since
// last we checked; note that those checks were "fast bail-outs".
// Here we need to be more careful, see 14012000 below.
if (compilation_is_in_queue(method)) {
return;
}
// We need to check again to see if the compilation has
// completed. A previous compilation may have registered
// some result.
if (compilation_is_complete(method, osr_bci, comp_level)) {
return;
}
// We now know that this compilation is not pending, complete,
// or prohibited. Assign a compile_id to this compilation
// and check to see if it is in our [Start..Stop) range.
int compile_id = assign_compile_id(method, osr_bci);
if (compile_id == 0) {
// The compilation falls outside the allowed range.
return;
}
#if INCLUDE_JVMCI
if (UseJVMCICompiler) {
if (blocking) {
// Don't allow blocking compiles for requests triggered by JVMCI.
if (thread->is_Compiler_thread()) {
blocking = false;
}
// Don't allow blocking compiles if inside a class initializer or while performing class loading
vframeStream vfst((JavaThread*) thread);
for (; !vfst.at_end(); vfst.next()) {
if (vfst.method()->is_static_initializer() ||
(vfst.method()->method_holder()->is_subclass_of(SystemDictionary::ClassLoader_klass()) &&
vfst.method()->name() == vmSymbols::loadClass_name())) {
blocking = false;
break;
}
}
// Don't allow blocking compilation requests to JVMCI
// if JVMCI itself is not yet initialized
if (!JVMCIRuntime::is_HotSpotJVMCIRuntime_initialized() && compiler(comp_level)->is_jvmci()) {
blocking = false;
}
// Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown
// to avoid deadlock between compiler thread(s) and threads run at shutdown
// such as the DestroyJavaVM thread.
if (JVMCIRuntime::shutdown_called()) {
blocking = false;
}
}
}
#endif // INCLUDE_JVMCI
// We will enter the compilation in the queue.
// 14012000: Note that this sets the queued_for_compile bits in
// the target method. We can now reason that a method cannot be
// queued for compilation more than once, as follows:
// Before a thread queues a task for compilation, it first acquires
// the compile queue lock, then checks if the method's queued bits
// are set or it has already been compiled. Thus there can not be two
// instances of a compilation task for the same method on the
// compilation queue. Consider now the case where the compilation
// thread has already removed a task for that method from the queue
// and is in the midst of compiling it. In this case, the
// queued_for_compile bits must be set in the method (and these
// will be visible to the current thread, since the bits were set
// under protection of the compile queue lock, which we hold now.
// When the compilation completes, the compiler thread first sets
// the compilation result and then clears the queued_for_compile
// bits. Neither of these actions are protected by a barrier (or done
// under the protection of a lock), so the only guarantee we have
// (on machines with TSO (Total Store Order)) is that these values
// will update in that order. As a result, the only combinations of
// these bits that the current thread will see are, in temporal order:
// <RESULT, QUEUE> :
// <0, 1> : in compile queue, but not yet compiled
// <1, 1> : compiled but queue bit not cleared
// <1, 0> : compiled and queue bit cleared
// Because we first check the queue bits then check the result bits,
// we are assured that we cannot introduce a duplicate task.
// Note that if we did the tests in the reverse order (i.e. check
// result then check queued bit), we could get the result bit before
// the compilation completed, and the queue bit after the compilation
// completed, and end up introducing a "duplicate" (redundant) task.
// In that case, the compiler thread should first check if a method
// has already been compiled before trying to compile it.
// NOTE: in the event that there are multiple compiler threads and
// there is de-optimization/recompilation, things will get hairy,
// and in that case it's best to protect both the testing (here) of
// these bits, and their updating (here and elsewhere) under a
// common lock.
task = create_compile_task(queue,
compile_id, method,
osr_bci, comp_level,
hot_method, hot_count, compile_reason,
blocking);
}
if (blocking) {
wait_for_completion(task);
}
}
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
int comp_level,
const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason,
Thread* THREAD) {
// Do nothing if compilebroker is not initalized or compiles are submitted on level none
if (!_initialized || comp_level == CompLevel_none) {
return NULL;
}
AbstractCompiler *comp = CompileBroker::compiler(comp_level);
assert(comp != NULL, "Ensure we have a compiler");
DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, directive, THREAD);
DirectivesStack::release(directive);
return nm;
}
nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
int comp_level,
const methodHandle& hot_method, int hot_count,
CompileTask::CompileReason compile_reason,
DirectiveSet* directive,
Thread* THREAD) {
// make sure arguments make sense
assert(method->method_holder()->is_instance_klass(), "not an instance method");
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
assert(!TieredCompilation || comp_level <= TieredStopAtLevel, "Invalid compilation level");
// allow any levels for WhiteBox
assert(WhiteBoxAPI || TieredCompilation || comp_level == CompLevel_highest_tier, "only CompLevel_highest_tier must be used in non-tiered");
// return quickly if possible
// lock, make sure that the compilation
// isn't prohibited in a straightforward way.
AbstractCompiler* comp = CompileBroker::compiler(comp_level);
if (comp == NULL || !comp->can_compile_method(method) ||
compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) {
return NULL;
}
#if INCLUDE_JVMCI
if (comp->is_jvmci() && !JVMCIRuntime::can_initialize_JVMCI()) {
return NULL;
}
#endif
if (osr_bci == InvocationEntryBci) {
// standard compilation
CompiledMethod* method_code = method->code();
if (method_code != NULL && method_code->is_nmethod()) {
if (compilation_is_complete(method, osr_bci, comp_level)) {
return (nmethod*) method_code;
}
}
if (method->is_not_compilable(comp_level)) {
return NULL;
}
} else {
// osr compilation
#ifndef TIERED
// seems like an assert of dubious value
assert(comp_level == CompLevel_highest_tier,
"all OSR compiles are assumed to be at a single compilation level");
#endif // TIERED
// We accept a higher level osr method
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
if (nm != NULL) return nm;
if (method->is_not_osr_compilable(comp_level)) return NULL;
}
assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
// some prerequisites that are compiler specific
if (comp->is_c2()) {
method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
// Resolve all classes seen in the signature of the method
// we are compiling.
Method::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
}
// If the method is native, do the lookup in the thread requesting
// the compilation. Native lookups can load code, which is not
// permitted during compilation.
//
// Note: A native method implies non-osr compilation which is
// checked with an assertion at the entry of this method.
if (method->is_native() && !method->is_method_handle_intrinsic()) {
bool in_base_library;
address adr = NativeLookup::lookup(method, in_base_library, THREAD);
if (HAS_PENDING_EXCEPTION) {
// In case of an exception looking up the method, we just forget
// about it. The interpreter will kick-in and throw the exception.
method->set_not_compilable(); // implies is_not_osr_compilable()
CLEAR_PENDING_EXCEPTION;
return NULL;
}
assert(method->has_native_function(), "must have native code by now");
}
// RedefineClasses() has replaced this method; just return
if (method->is_old()) {
return NULL;
}
// JVMTI -- post_compile_event requires jmethod_id() that may require
// a lock the compiling thread can not acquire. Prefetch it here.
if (JvmtiExport::should_post_compiled_method_load()) {
method->jmethod_id();
}
// do the compilation
if (method->is_native()) {
if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
// The following native methods:
//
// java.lang.Float.intBitsToFloat
// java.lang.Float.floatToRawIntBits
// java.lang.Double.longBitsToDouble
// java.lang.Double.doubleToRawLongBits
//
// are called through the interpreter even if interpreter native stubs
// are not preferred (i.e., calling through adapter handlers is preferred).
// The reason is that on x86_32 signaling NaNs (sNaNs) are not preserved
// if the version of the methods from the native libraries is called.
// As the interpreter and the C2-intrinsified version of the methods preserves
// sNaNs, that would result in an inconsistent way of handling of sNaNs.
if ((UseSSE >= 1 &&
(method->intrinsic_id() == vmIntrinsics::_intBitsToFloat ||
method->intrinsic_id() == vmIntrinsics::_floatToRawIntBits)) ||
(UseSSE >= 2 &&
(method->intrinsic_id() == vmIntrinsics::_longBitsToDouble ||
method->intrinsic_id() == vmIntrinsics::_doubleToRawLongBits))) {
return NULL;
}
// To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
// pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
//
// Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
// in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls.
AdapterHandlerLibrary::create_native_wrapper(method);
} else {
return NULL;
}
} else {
// If the compiler is shut off due to code cache getting full
// fail out now so blocking compiles dont hang the java thread
if (!should_compile_new_jobs()) {
CompilationPolicy::policy()->delay_compilation(method());
return NULL;
}
bool is_blocking = !directive->BackgroundCompilationOption || CompileTheWorld || ReplayCompiles;
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, is_blocking, THREAD);
}
// return requested nmethod
// We accept a higher level osr method
if (osr_bci == InvocationEntryBci) {
CompiledMethod* code = method->code();
if (code == NULL) {
return (nmethod*) code;
} else {
return code->as_nmethod_or_null();
}
}
return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
}
// ------------------------------------------------------------------
// CompileBroker::compilation_is_complete
//
// See if compilation of this method is already complete.
bool CompileBroker::compilation_is_complete(const methodHandle& method,
int osr_bci,
int comp_level) {
bool is_osr = (osr_bci != standard_entry_bci);
if (is_osr) {
if (method->is_not_osr_compilable(comp_level)) {
return true;
} else {
nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
return (result != NULL);
}
} else {
if (method->is_not_compilable(comp_level)) {
return true;
} else {
CompiledMethod* result = method->code();
if (result == NULL) return false;
return comp_level == result->comp_level();
}
}
}
/**
* See if this compilation is already requested.
*
* Implementation note: there is only a single "is in queue" bit
* for each method. This means that the check below is overly
* conservative in the sense that an osr compilation in the queue
* will block a normal compilation from entering the queue (and vice
* versa). This can be remedied by a full queue search to disambiguate
* cases. If it is deemed profitable, this may be done.
*/
bool CompileBroker::compilation_is_in_queue(const methodHandle& method) {
return method->queued_for_compilation();
}
// ------------------------------------------------------------------
// CompileBroker::compilation_is_prohibited
//
// See if this compilation is not allowed.
bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) {
bool is_native = method->is_native();
// Some compilers may not support the compilation of natives.
AbstractCompiler *comp = compiler(comp_level);
if (is_native &&
(!CICompileNatives || comp == NULL || !comp->supports_native())) {
method->set_not_compilable_quietly(comp_level);
return true;
}
bool is_osr = (osr_bci != standard_entry_bci);
// Some compilers may not support on stack replacement.
if (is_osr &&
(!CICompileOSR || comp == NULL || !comp->supports_osr())) {
method->set_not_osr_compilable(comp_level);
return true;
}
// The method may be explicitly excluded by the user.
double scale;
if (excluded || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
bool quietly = CompilerOracle::should_exclude_quietly();
if (PrintCompilation && !quietly) {
// This does not happen quietly...
ResourceMark rm;
tty->print("### Excluding %s:%s",
method->is_native() ? "generation of native wrapper" : "compile",
(method->is_static() ? " static" : ""));
method->print_short_name(tty);
tty->cr();
}
method->set_not_compilable(comp_level, !quietly, "excluded by CompileCommand");
}
return false;
}
/**
* Generate serialized IDs for compilation requests. If certain debugging flags are used
* and the ID is not within the specified range, the method is not compiled and 0 is returned.
* The function also allows to generate separate compilation IDs for OSR compilations.
*/
int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
#ifdef ASSERT
bool is_osr = (osr_bci != standard_entry_bci);
int id;
if (method->is_native()) {
assert(!is_osr, "can't be osr");
// Adapters, native wrappers and method handle intrinsics
// should be generated always.
return Atomic::add(1, &_compilation_id);
} else if (CICountOSR && is_osr) {
id = Atomic::add(1, &_osr_compilation_id);
if (CIStartOSR <= id && id < CIStopOSR) {
return id;
}
} else {
id = Atomic::add(1, &_compilation_id);
if (CIStart <= id && id < CIStop) {
return id;
}
}
// Method was not in the appropriate compilation range.
method->set_not_compilable_quietly();
return 0;
#else
// CICountOSR is a develop flag and set to 'false' by default. In a product built,
// only _compilation_id is incremented.
return Atomic::add(1, &_compilation_id);
#endif
}
// ------------------------------------------------------------------
// CompileBroker::assign_compile_id_unlocked
//
// Public wrapper for assign_compile_id that acquires the needed locks
uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
MutexLocker locker(MethodCompileQueue_lock, thread);
return assign_compile_id(method, osr_bci);
}
// ------------------------------------------------------------------
// CompileBroker::preload_classes
void CompileBroker::preload_classes(const methodHandle& method, TRAPS) {
// Move this code over from c1_Compiler.cpp
ShouldNotReachHere();
}
// ------------------------------------------------------------------
// CompileBroker::create_compile_task
//
// Create a CompileTask object representing the current request for
// compilation. Add this task to the queue.
CompileTask* CompileBroker::create_compile_task(CompileQueue* queue,
int compile_id,
const methodHandle& method,
int osr_bci,
int comp_level,
const methodHandle& hot_method,
int hot_count,
CompileTask::CompileReason compile_reason,
bool blocking) {
CompileTask* new_task = CompileTask::allocate();
new_task->initialize(compile_id, method, osr_bci, comp_level,
hot_method, hot_count, compile_reason,
blocking);
queue->add(new_task);
return new_task;
}
#if INCLUDE_JVMCI
// The number of milliseconds to wait before checking if
// JVMCI compilation has made progress.
static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000;
// The number of JVMCI compilation progress checks that must fail
// before unblocking a thread waiting for a blocking compilation.
static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
/**
* Waits for a JVMCI compiler to complete a given task. This thread
* waits until either the task completes or it sees no JVMCI compilation
* progress for N consecutive milliseconds where N is
* JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE *
* JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS.
*
* @return true if this thread needs to free/recycle the task
*/
bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
MutexLocker waiter(task->lock(), thread);
int progress_wait_attempts = 0;
int methods_compiled = jvmci->methods_compiled();
while (!task->is_complete() && !is_compilation_disabled_forever() &&
task->lock()->wait(!Mutex::_no_safepoint_check_flag, JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
CompilerThread* jvmci_compiler_thread = task->jvmci_compiler_thread();
bool progress;
if (jvmci_compiler_thread != NULL) {
// If the JVMCI compiler thread is not blocked, we deem it to be making progress.
progress = jvmci_compiler_thread->thread_state() != _thread_blocked;
} else {
// Still waiting on JVMCI compiler queue. This thread may be holding a lock
// that all JVMCI compiler threads are blocked on. We use the counter for
// successful JVMCI compilations to determine whether JVMCI compilation
// is still making progress through the JVMCI compiler queue.
progress = jvmci->methods_compiled() != methods_compiled;
}
if (!progress) {
if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) {
if (PrintCompilation) {
task->print(tty, "wait for blocking compilation timed out");
}
break;
}
} else {
progress_wait_attempts = 0;
if (jvmci_compiler_thread == NULL) {
methods_compiled = jvmci->methods_compiled();
}
}
}
task->clear_waiter();
return task->is_complete();
}
#endif
/**
* Wait for the compilation task to complete.
*/
void CompileBroker::wait_for_completion(CompileTask* task) {
if (CIPrintCompileQueue) {
ttyLocker ttyl;
tty->print_cr("BLOCKING FOR COMPILE");
}
assert(task->is_blocking(), "can only wait on blocking task");
JavaThread* thread = JavaThread::current();
thread->set_blocked_on_compilation(true);
methodHandle method(thread, task->method());
bool free_task;
#if INCLUDE_JVMCI
AbstractCompiler* comp = compiler(task->comp_level());
if (comp->is_jvmci()) {
free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread);
} else
#endif
{
MutexLocker waiter(task->lock(), thread);
free_task = true;
while (!task->is_complete() && !is_compilation_disabled_forever()) {
task->lock()->wait();
}
}
thread->set_blocked_on_compilation(false);
if (free_task) {
if (is_compilation_disabled_forever()) {
CompileTask::free(task);
return;
}
// It is harmless to check this status without the lock, because
// completion is a stable property (until the task object is recycled).
assert(task->is_complete(), "Compilation should have completed");
assert(task->code_handle() == NULL, "must be reset");
// By convention, the waiter is responsible for recycling a
// blocking CompileTask. Since there is only one waiter ever
// waiting on a CompileTask, we know that no one else will
// be using this CompileTask; we can free it.
CompileTask::free(task);
}
}
/**
* Initialize compiler thread(s) + compiler object(s). The postcondition
* of this function is that the compiler runtimes are initialized and that
* compiler threads can start compiling.
*/
bool CompileBroker::init_compiler_runtime() {
CompilerThread* thread = CompilerThread::current();
AbstractCompiler* comp = thread->compiler();
// Final sanity check - the compiler object must exist
guarantee(comp != NULL, "Compiler object must exist");
int system_dictionary_modification_counter;
{
MutexLocker locker(Compile_lock, thread);
system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
}
{
// Must switch to native to allocate ci_env
ThreadToNativeFromVM ttn(thread);
ciEnv ci_env(NULL, system_dictionary_modification_counter);
// Cache Jvmti state
ci_env.cache_jvmti_state();
// Cache DTrace flags
ci_env.cache_dtrace_flags();
// Switch back to VM state to do compiler initialization
ThreadInVMfromNative tv(thread);
ResetNoHandleMark rnhm;
// Perform per-thread and global initializations
comp->initialize();
}
if (comp->is_failed()) {
disable_compilation_forever();
// If compiler initialization failed, no compiler thread that is specific to a
// particular compiler runtime will ever start to compile methods.
shutdown_compiler_runtime(comp, thread);
return false;
}
// C1 specific check
if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
return false;
}
return true;
}
/**
* If C1 and/or C2 initialization failed, we shut down all compilation.
* We do this to keep things simple. This can be changed if it ever turns
* out to be a problem.
*/
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
// Free buffer blob, if allocated
if (thread->get_buffer_blob() != NULL) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(thread->get_buffer_blob());
}
if (comp->should_perform_shutdown()) {
// There are two reasons for shutting down the compiler
// 1) compiler runtime initialization failed
// 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
warning("%s initialization failed. Shutting down all compilers", comp->name());
// Only one thread per compiler runtime object enters here
// Set state to shut down
comp->set_shut_down();
// Delete all queued compilation tasks to make compiler threads exit faster.
if (_c1_compile_queue != NULL) {
_c1_compile_queue->free_all();
}
if (_c2_compile_queue != NULL) {
_c2_compile_queue->free_all();
}
// Set flags so that we continue execution with using interpreter only.
UseCompiler = false;
UseInterpreter = true;
// We could delete compiler runtimes also. However, there are references to
// the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then
// fail. This can be done later if necessary.
}
}
/**
* Helper function to create new or reuse old CompileLog.
*/
CompileLog* CompileBroker::get_log(CompilerThread* ct) {
if (!LogCompilation) return NULL;
AbstractCompiler *compiler = ct->compiler();
bool c1 = compiler->is_c1();
jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects;
assert(compiler_objects != NULL, "must be initialized at this point");
CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs;
assert(logs != NULL, "must be initialized at this point");
int count = c1 ? _c1_count : _c2_count;
// Find Compiler number by its threadObj.
oop compiler_obj = ct->threadObj();
int compiler_number = 0;
bool found = false;
for (; compiler_number < count; compiler_number++) {
if (oopDesc::equals(JNIHandles::resolve_non_null(compiler_objects[compiler_number]), compiler_obj)) {
found = true;
break;
}
}
assert(found, "Compiler must exist at this point");
// Determine pointer for this thread's log.
CompileLog** log_ptr = &logs[compiler_number];
// Return old one if it exists.
CompileLog* log = *log_ptr;
if (log != NULL) {
ct->init_log(log);
return log;
}
// Create a new one and remember it.
init_compiler_thread_log();
log = ct->log();
*log_ptr = log;
return log;
}
// ------------------------------------------------------------------
// CompileBroker::compiler_thread_loop
//
// The main loop run by a CompilerThread.
void CompileBroker::compiler_thread_loop() {
CompilerThread* thread = CompilerThread::current();
CompileQueue* queue = thread->queue();
// For the thread that initializes the ciObjectFactory
// this resource mark holds all the shared objects
ResourceMark rm;
// First thread to get here will initialize the compiler interface
{
ASSERT_IN_VM;
MutexLocker only_one (CompileThread_lock, thread);
if (!ciObjectFactory::is_initialized()) {
ciObjectFactory::initialize();
}
}
// Open a log.
CompileLog* log = get_log(thread);
if (log != NULL) {
log->begin_elem("start_compile_thread name='%s' thread='" UINTX_FORMAT "' process='%d'",
thread->name(),
os::current_thread_id(),
os::current_process_id());
log->stamp();
log->end_elem();
}
// If compiler thread/runtime initialization fails, exit the compiler thread
if (!init_compiler_runtime()) {
return;
}
thread->start_idle_timer();
// Poll for new compilation tasks as long as the JVM runs. Compilation
// should only be disabled if something went wrong while initializing the
// compiler runtimes. This, in turn, should not happen. The only known case
// when compiler runtime initialization fails is if there is not enough free
// space in the code cache to generate the necessary stubs, etc.
while (!is_compilation_disabled_forever()) {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
CompileTask* task = queue->get();
if (task == NULL) {
if (UseDynamicNumberOfCompilerThreads) {
// Access compiler_count under lock to enforce consistency.
MutexLocker only_one(CompileThread_lock);
if (can_remove(thread, true)) {
if (TraceCompilerThreads) {
tty->print_cr("Removing compiler thread %s after " JLONG_FORMAT " ms idle time",
thread->name(), thread->idle_time_millis());
}
// Free buffer blob, if allocated
if (thread->get_buffer_blob() != NULL) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(thread->get_buffer_blob());
}
return; // Stop this thread.
}
}
} else {
// Give compiler threads an extra quanta. They tend to be bursty and
// this helps the compiler to finish up the job.
if (CompilerThreadHintNoPreempt) {
os::hint_no_preempt();
}
// Assign the task to the current thread. Mark this compilation
// thread as active for the profiler.
// CompileTaskWrapper also keeps the Method* from being deallocated if redefinition
// occurs after fetching the compile task off the queue.
CompileTaskWrapper ctw(task);
nmethodLocker result_handle; // (handle for the nmethod produced by this task)
task->set_code_handle(&result_handle);
methodHandle method(thread, task->method());
// Never compile a method if breakpoints are present in it
if (method()->number_of_breakpoints() == 0) {
// Compile the method.
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
invoke_compiler_on_method(task);
thread->start_idle_timer();
} else {
// After compilation is disabled, remove remaining methods from queue
method->clear_queued_for_compilation();
task->set_failure_reason("compilation is disabled");
}
}
if (UseDynamicNumberOfCompilerThreads) {
possibly_add_compiler_threads();
}
}
}
// Shut down compiler runtime
shutdown_compiler_runtime(thread->compiler(), thread);
}
// ------------------------------------------------------------------
// CompileBroker::init_compiler_thread_log
//
// Set up state required by +LogCompilation.
void CompileBroker::init_compiler_thread_log() {
CompilerThread* thread = CompilerThread::current();
char file_name[4*K];
FILE* fp = NULL;
intx thread_id = os::current_thread_id();
for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
if (dir == NULL) {
jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log",
thread_id, os::current_process_id());
} else {
jio_snprintf(file_name, sizeof(file_name),
"%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
os::file_separator(), thread_id, os::current_process_id());
}
fp = fopen(file_name, "wt");
if (fp != NULL) {
if (LogCompilation && Verbose) {
tty->print_cr("Opening compilation log %s", file_name);
}
CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id);
if (log == NULL) {
fclose(fp);
return;
}
thread->init_log(log);
if (xtty != NULL) {
ttyLocker ttyl;
// Record any per thread log files
xtty->elem("thread_logfile thread='" INTX_FORMAT "' filename='%s'", thread_id, file_name);
}
return;
}
}
warning("Cannot open log file: %s", file_name);
}
void CompileBroker::log_metaspace_failure() {
const char* message = "some methods may not be compiled because metaspace "
"is out of memory";
if (_compilation_log != NULL) {
_compilation_log->log_metaspace_failure(message);
}
if (PrintCompilation) {
tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
}
}
// ------------------------------------------------------------------
// CompileBroker::set_should_block
//
// Set _should_block.
// Call this from the VM, with Threads_lock held and a safepoint requested.
void CompileBroker::set_should_block() {
assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already");
#ifndef PRODUCT
if (PrintCompilation && (Verbose || WizardMode))
tty->print_cr("notifying compiler thread pool to block");
#endif
_should_block = true;
}
// ------------------------------------------------------------------
// CompileBroker::maybe_block
//
// Call this from the compiler at convenient points, to poll for _should_block.
void CompileBroker::maybe_block() {
if (_should_block) {
#ifndef PRODUCT
if (PrintCompilation && (Verbose || WizardMode))
tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current()));
#endif
ThreadInVMfromNative tivfn(JavaThread::current());
}
}
// wrapper for CodeCache::print_summary()
static void codecache_print(bool detailed)
{
ResourceMark rm;
stringStream s;
// Dump code cache into a buffer before locking the tty,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_summary(&s, detailed);
}
ttyLocker ttyl;
tty->print("%s", s.as_string());
}
// wrapper for CodeCache::print_summary() using outputStream
static void codecache_print(outputStream* out, bool detailed) {
ResourceMark rm;
stringStream s;
// Dump code cache into a buffer
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_summary(&s, detailed);
}
char* remaining_log = s.as_string();
while (*remaining_log != '\0') {
char* eol = strchr(remaining_log, '\n');
if (eol == NULL) {
out->print_cr("%s", remaining_log);
remaining_log = remaining_log + strlen(remaining_log);
} else {
*eol = '\0';
out->print_cr("%s", remaining_log);
remaining_log = eol + 1;
}
}
}
void CompileBroker::post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
int compilable, const char* failure_reason) {
if (success) {
task->mark_success();
if (ci_env != NULL) {
task->set_num_inlined_bytecodes(ci_env->num_inlined_bytecodes());
}
if (_compilation_log != NULL) {
nmethod* code = task->code();
if (code != NULL) {
_compilation_log->log_nmethod(thread, code);
}
}
} else if (AbortVMOnCompilationFailure) {
if (compilable == ciEnv::MethodCompilable_not_at_tier) {
fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason);
}
if (compilable == ciEnv::MethodCompilable_never) {
fatal("Never compilable: %s", failure_reason);
}
}
// simulate crash during compilation
assert(task->compile_id() != CICrashAt, "just as planned");
}
static void post_compilation_event(EventCompilation* event, CompileTask* task) {
assert(event != NULL, "invariant");
assert(event->should_commit(), "invariant");
event->set_method(task->method());
event->set_compileId(task->compile_id());
event->set_compileLevel(task->comp_level());
event->set_succeded(task->is_success());
event->set_isOsr(task->osr_bci() != CompileBroker::standard_entry_bci);
event->set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
event->set_inlinedBytes(task->num_inlined_bytecodes());
event->commit();
}
int DirectivesStack::_depth = 0;
CompilerDirectives* DirectivesStack::_top = NULL;
CompilerDirectives* DirectivesStack::_bottom = NULL;
// ------------------------------------------------------------------
// CompileBroker::invoke_compiler_on_method
//
// Compile a method.
//
void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
task->print_ul();
if (PrintCompilation) {
ResourceMark rm;
task->print_tty();
}
elapsedTimer time;
CompilerThread* thread = CompilerThread::current();
ResourceMark rm(thread);
if (LogEvents) {
_compilation_log->log_compile(thread, task);
}
// Common flags.
uint compile_id = task->compile_id();
int osr_bci = task->osr_bci();
bool is_osr = (osr_bci != standard_entry_bci);
bool should_log = (thread->log() != NULL);
bool should_break = false;
const int task_level = task->comp_level();
AbstractCompiler* comp = task->compiler();
DirectiveSet* directive;
{
// create the handle inside it's own block so it can't
// accidentally be referenced once the thread transitions to
// native. The NoHandleMark before the transition should catch
// any cases where this occurs in the future.
methodHandle method(thread, task->method());
assert(!method->is_native(), "no longer compile natives");
// Look up matching directives
directive = DirectivesStack::getMatchingDirective(method, comp);
// Save information about this method in case of failure.
set_last_compile(thread, method, is_osr, task_level);
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
}
should_break = directive->BreakAtExecuteOption || task->check_break_at_flags();
if (should_log && !directive->LogOption) {
should_log = false;
}
// Allocate a new set of JNI handles.
push_jni_handle_block();
Method* target_handle = task->method();
int compilable = ciEnv::MethodCompilable;
const char* failure_reason = NULL;
const char* retry_message = NULL;
int system_dictionary_modification_counter;
{
MutexLocker locker(Compile_lock, thread);
system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
}
#if INCLUDE_JVMCI
if (UseJVMCICompiler && comp != NULL && comp->is_jvmci()) {
JVMCICompiler* jvmci = (JVMCICompiler*) comp;
TraceTime t1("compilation", &time);
EventCompilation event;
// Skip redefined methods
if (target_handle->is_old()) {
failure_reason = "redefined method";
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_never;
} else {
JVMCIEnv env(task, system_dictionary_modification_counter);
methodHandle method(thread, target_handle);
jvmci->compile_method(method, osr_bci, &env);
failure_reason = env.failure_reason();
if (!env.retryable()) {
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_not_at_tier;
}
}
post_compile(thread, task, task->code() != NULL, NULL, compilable, failure_reason);
if (event.should_commit()) {
post_compilation_event(&event, task);
}
} else
#endif // INCLUDE_JVMCI
{
NoHandleMark nhm;
ThreadToNativeFromVM ttn(thread);
ciEnv ci_env(task, system_dictionary_modification_counter);
if (should_break) {
ci_env.set_break_at_compile(true);
}
if (should_log) {
ci_env.set_log(thread->log());
}
assert(thread->env() == &ci_env, "set by ci_env");
// The thread-env() field is cleared in ~CompileTaskWrapper.
// Cache Jvmti state
ci_env.cache_jvmti_state();
// Cache DTrace flags
ci_env.cache_dtrace_flags();
ciMethod* target = ci_env.get_method_from_handle(target_handle);
TraceTime t1("compilation", &time);
EventCompilation event;
if (comp == NULL) {
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
} else {
if (WhiteBoxAPI && WhiteBox::compilation_locked) {
MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
while (WhiteBox::compilation_locked) {
locker.wait(Mutex::_no_safepoint_check_flag);
}
}
comp->compile_method(&ci_env, target, osr_bci, directive);
}
if (!ci_env.failing() && task->code() == NULL) {
//assert(false, "compiler should always document failure");
// The compiler elected, without comment, not to register a result.
// Do not attempt further compilations of this method.
ci_env.record_method_not_compilable("compile failed", !TieredCompilation);
}
// Copy this bit to the enclosing block:
compilable = ci_env.compilable();
if (ci_env.failing()) {
failure_reason = ci_env.failure_reason();
retry_message = ci_env.retry_message();
ci_env.report_failure(failure_reason);
}
post_compile(thread, task, !ci_env.failing(), &ci_env, compilable, failure_reason);
if (event.should_commit()) {
post_compilation_event(&event, task);
}
}
// Remove the JNI handle block after the ciEnv destructor has run in
// the previous block.
pop_jni_handle_block();
if (failure_reason != NULL) {
task->set_failure_reason(failure_reason);
if (_compilation_log != NULL) {
_compilation_log->log_failure(thread, task, failure_reason, retry_message);
}
if (PrintCompilation) {
FormatBufferResource msg = retry_message != NULL ?
FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) :
FormatBufferResource("COMPILE SKIPPED: %s", failure_reason);
task->print(tty, msg);
}
}
methodHandle method(thread, task->method());
DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
collect_statistics(thread, time, task);
nmethod* nm = task->code();
if (nm != NULL) {
nm->maybe_print_nmethod(directive);
}
DirectivesStack::release(directive);
if (PrintCompilation && PrintCompilation2) {
tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
tty->print("%4d ", compile_id); // print compilation number
tty->print("%s ", (is_osr ? "%" : " "));
if (task->code() != NULL) {
tty->print("size: %d(%d) ", task->code()->total_size(), task->code()->insts_size());
}
tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
}
Log(compilation, codecache) log;
if (log.is_debug()) {
LogStream ls(log.debug());
codecache_print(&ls, /* detailed= */ false);
}
if (PrintCodeCacheOnCompilation) {
codecache_print(/* detailed= */ false);
}
// Disable compilation, if required.
switch (compilable) {
case ciEnv::MethodCompilable_never:
if (is_osr)
method->set_not_osr_compilable_quietly();
else
method->set_not_compilable_quietly();
break;
case ciEnv::MethodCompilable_not_at_tier:
if (is_osr)
method->set_not_osr_compilable_quietly(task_level);
else
method->set_not_compilable_quietly(task_level);
break;
}
// Note that the queued_for_compilation bits are cleared without
// protection of a mutex. [They were set by the requester thread,
// when adding the task to the compile queue -- at which time the
// compile queue lock was held. Subsequently, we acquired the compile
// queue lock to get this task off the compile queue; thus (to belabour
// the point somewhat) our clearing of the bits must be occurring
// only after the setting of the bits. See also 14012000 above.
method->clear_queued_for_compilation();
#ifdef ASSERT
if (CollectedHeap::fired_fake_oom()) {
// The current compile received a fake OOM during compilation so
// go ahead and exit the VM since the test apparently succeeded
tty->print_cr("*** Shutting down VM after successful fake OOM");
vm_exit(0);
}
#endif
}
/**
* The CodeCache is full. Print warning and disable compilation.
* Schedule code cache cleaning so compilation can continue later.
* This function needs to be called only from CodeCache::allocate(),
* since we currently handle a full code cache uniformly.
*/
void CompileBroker::handle_full_code_cache(int code_blob_type) {
UseInterpreter = true;
if (UseCompiler || AlwaysCompileLoopMethods ) {
if (xtty != NULL) {
ResourceMark rm;
stringStream s;
// Dump code cache state into a buffer before locking the tty,
// because log_state() will use locks causing lock conflicts.
CodeCache::log_state(&s);
// Lock to prevent tearing
ttyLocker ttyl;
xtty->begin_elem("code_cache_full");
xtty->print("%s", s.as_string());
xtty->stamp();
xtty->end_elem();
}
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
codecache_print(/* detailed= */ true);
before_exit(JavaThread::current());
exit_globals(); // will delete tty
vm_direct_exit(CompileTheWorld ? 0 : 1);
}
#endif
if (UseCodeCacheFlushing) {
// Since code cache is full, immediately stop new compiles
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
}
} else {
disable_compilation_forever();
}
CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
}
}
// ------------------------------------------------------------------
// CompileBroker::set_last_compile
//
// Record this compilation for debugging purposes.
void CompileBroker::set_last_compile(CompilerThread* thread, const methodHandle& method, bool is_osr, int comp_level) {
ResourceMark rm;
char* method_name = method->name()->as_C_string();
strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
_last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
char current_method[CompilerCounters::cmname_buffer_length];
size_t maxLen = CompilerCounters::cmname_buffer_length;
if (UsePerfData) {
const char* class_name = method->method_holder()->name()->as_C_string();
size_t s1len = strlen(class_name);
size_t s2len = strlen(method_name);
// check if we need to truncate the string
if (s1len + s2len + 2 > maxLen) {
// the strategy is to lop off the leading characters of the
// class name and the trailing characters of the method name.
if (s2len + 2 > maxLen) {
// lop of the entire class name string, let snprintf handle
// truncation of the method name.
class_name += s1len; // null string
}
else {
// lop off the extra characters from the front of the class name
class_name += ((s1len + s2len + 2) - maxLen);
}
}
jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
}
if (CICountOSR && is_osr) {
_last_compile_type = osr_compile;
} else {
_last_compile_type = normal_compile;
}
_last_compile_level = comp_level;
if (UsePerfData) {
CompilerCounters* counters = thread->counters();
counters->set_current_method(current_method);
counters->set_compile_type((jlong)_last_compile_type);
}
}
// ------------------------------------------------------------------
// CompileBroker::push_jni_handle_block
//
// Push on a new block of JNI handles.
void CompileBroker::push_jni_handle_block() {
JavaThread* thread = JavaThread::current();
// Allocate a new block for JNI handles.
// Inlined code from jni_PushLocalFrame()
JNIHandleBlock* java_handles = thread->active_handles();
JNIHandleBlock* compile_handles = JNIHandleBlock::allocate_block(thread);
assert(compile_handles != NULL && java_handles != NULL, "should not be NULL");
compile_handles->set_pop_frame_link(java_handles); // make sure java handles get gc'd.
thread->set_active_handles(compile_handles);
}
// ------------------------------------------------------------------
// CompileBroker::pop_jni_handle_block
//
// Pop off the current block of JNI handles.
void CompileBroker::pop_jni_handle_block() {
JavaThread* thread = JavaThread::current();
// Release our JNI handle block
JNIHandleBlock* compile_handles = thread->active_handles();
JNIHandleBlock* java_handles = compile_handles->pop_frame_link();
thread->set_active_handles(java_handles);
compile_handles->set_pop_frame_link(NULL);
JNIHandleBlock::release_block(compile_handles, thread); // may block
}
// ------------------------------------------------------------------
// CompileBroker::collect_statistics
//
// Collect statistics about the compilation.
void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) {
bool success = task->is_success();
methodHandle method (thread, task->method());
uint compile_id = task->compile_id();
bool is_osr = (task->osr_bci() != standard_entry_bci);
nmethod* code = task->code();
CompilerCounters* counters = thread->counters();
assert(code == NULL || code->is_locked_by_vm(), "will survive the MutexLocker");
MutexLocker locker(CompileStatistics_lock);
// _perf variables are production performance counters which are
// updated regardless of the setting of the CITime and CITimeEach flags
//
// account all time, including bailouts and failures in this counter;
// C1 and C2 counters are counting both successful and unsuccessful compiles
_t_total_compilation.add(time);
if (!success) {
_total_bailout_count++;
if (UsePerfData) {
_perf_last_failed_method->set_value(counters->current_method());
_perf_last_failed_type->set_value(counters->compile_type());
_perf_total_bailout_count->inc();
}
_t_bailedout_compilation.add(time);
} else if (code == NULL) {
if (UsePerfData) {
_perf_last_invalidated_method->set_value(counters->current_method());
_perf_last_invalidated_type->set_value(counters->compile_type());
_perf_total_invalidated_count->inc();
}
_total_invalidated_count++;
_t_invalidated_compilation.add(time);
} else {
// Compilation succeeded
// update compilation ticks - used by the implementation of
// java.lang.management.CompilationMBean
_perf_total_compilation->inc(time.ticks());
_peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time;
if (CITime) {
int bytes_compiled = method->code_size() + task->num_inlined_bytecodes();
if (is_osr) {
_t_osr_compilation.add(time);
_sum_osr_bytes_compiled += bytes_compiled;
} else {
_t_standard_compilation.add(time);
_sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
}
#if INCLUDE_JVMCI
AbstractCompiler* comp = compiler(task->comp_level());
if (comp) {
CompilerStatistics* stats = comp->stats();
if (stats) {
if (is_osr) {
stats->_osr.update(time, bytes_compiled);
} else {
stats->_standard.update(time, bytes_compiled);
}
stats->_nmethods_size += code->total_size();
stats->_nmethods_code_size += code->insts_size();
} else { // if (!stats)
assert(false, "Compiler statistics object must exist");
}
} else { // if (!comp)
assert(false, "Compiler object must exist");
}
#endif // INCLUDE_JVMCI
}
if (UsePerfData) {
// save the name of the last method compiled
_perf_last_method->set_value(counters->current_method());
_perf_last_compile_type->set_value(counters->compile_type());
_perf_last_compile_size->set_value(method->code_size() +
task->num_inlined_bytecodes());
if (is_osr) {
_perf_osr_compilation->inc(time.ticks());
_perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
} else {
_perf_standard_compilation->inc(time.ticks());
_perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
}
}
if (CITimeEach) {
float bytes_per_sec = 1.0 * (method->code_size() + task->num_inlined_bytecodes()) / time.seconds();
tty->print_cr("%3d seconds: %f bytes/sec : %f (bytes %d + %d inlined)",
compile_id, time.seconds(), bytes_per_sec, method->code_size(), task->num_inlined_bytecodes());
}
// Collect counts of successful compilations
_sum_nmethod_size += code->total_size();
_sum_nmethod_code_size += code->insts_size();
_total_compile_count++;
if (UsePerfData) {
_perf_sum_nmethod_size->inc( code->total_size());
_perf_sum_nmethod_code_size->inc(code->insts_size());
_perf_total_compile_count->inc();
}
if (is_osr) {
if (UsePerfData) _perf_total_osr_compile_count->inc();
_total_osr_compile_count++;
} else {
if (UsePerfData) _perf_total_standard_compile_count->inc();
_total_standard_compile_count++;
}
}
// set the current method for the thread to null
if (UsePerfData) counters->set_current_method("");
}
const char* CompileBroker::compiler_name(int comp_level) {
AbstractCompiler *comp = CompileBroker::compiler(comp_level);
if (comp == NULL) {
return "no compiler";
} else {
return (comp->name());
}
}
#if INCLUDE_JVMCI
void CompileBroker::print_times(AbstractCompiler* comp) {
CompilerStatistics* stats = comp->stats();
if (stats) {
tty->print_cr(" %s {speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}",
comp->name(), stats->bytes_per_second(),
stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count,
stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count,
stats->_nmethods_size, stats->_nmethods_code_size);
} else { // if (!stats)
assert(false, "Compiler statistics object must exist");
}
comp->print_timers();
}
#endif // INCLUDE_JVMCI
void CompileBroker::print_times(bool per_compiler, bool aggregate) {
#if INCLUDE_JVMCI
elapsedTimer standard_compilation;
elapsedTimer total_compilation;
elapsedTimer osr_compilation;
int standard_bytes_compiled = 0;
int osr_bytes_compiled = 0;
int standard_compile_count = 0;
int osr_compile_count = 0;
int total_compile_count = 0;
int nmethods_size = 0;
int nmethods_code_size = 0;
bool printedHeader = false;
for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) {
AbstractCompiler* comp = _compilers[i];
if (comp != NULL) {
if (per_compiler && aggregate && !printedHeader) {
printedHeader = true;
tty->cr();
tty->print_cr("Individual compiler times (for compiled methods only)");
tty->print_cr("------------------------------------------------");
tty->cr();
}
CompilerStatistics* stats = comp->stats();
if (stats) {
standard_compilation.add(stats->_standard._time);
osr_compilation.add(stats->_osr._time);
standard_bytes_compiled += stats->_standard._bytes;
osr_bytes_compiled += stats->_osr._bytes;
standard_compile_count += stats->_standard._count;
osr_compile_count += stats->_osr._count;
nmethods_size += stats->_nmethods_size;
nmethods_code_size += stats->_nmethods_code_size;
} else { // if (!stats)
assert(false, "Compiler statistics object must exist");
}
if (per_compiler) {
print_times(comp);
}
}
}
total_compile_count = osr_compile_count + standard_compile_count;
total_compilation.add(osr_compilation);
total_compilation.add(standard_compilation);
// In hosted mode, print the JVMCI compiler specific counters manually.
if (!UseJVMCICompiler) {
JVMCICompiler::print_compilation_timers();
}
#else // INCLUDE_JVMCI
elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation;
elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation;
elapsedTimer total_compilation = CompileBroker::_t_total_compilation;
int standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled;
int osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled;
int standard_compile_count = CompileBroker::_total_standard_compile_count;
int osr_compile_count = CompileBroker::_total_osr_compile_count;
int total_compile_count = CompileBroker::_total_compile_count;
int nmethods_size = CompileBroker::_sum_nmethod_code_size;
int nmethods_code_size = CompileBroker::_sum_nmethod_size;
#endif // INCLUDE_JVMCI
if (!aggregate) {
return;
}
tty->cr();
tty->print_cr("Accumulated compiler times");
tty->print_cr("----------------------------------------------------------");
//0000000000111111111122222222223333333333444444444455555555556666666666
//0123456789012345678901234567890123456789012345678901234567890123456789
tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds());
tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s",
standard_compilation.seconds(),
standard_compilation.seconds() / standard_compile_count);
tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s",
CompileBroker::_t_bailedout_compilation.seconds(),
CompileBroker::_t_bailedout_compilation.seconds() / CompileBroker::_total_bailout_count);
tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s",
osr_compilation.seconds(),
osr_compilation.seconds() / osr_compile_count);
tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s",
CompileBroker::_t_invalidated_compilation.seconds(),
CompileBroker::_t_invalidated_compilation.seconds() / CompileBroker::_total_invalidated_count);
AbstractCompiler *comp = compiler(CompLevel_simple);
if (comp != NULL) {
tty->cr();
comp->print_timers();
}
comp = compiler(CompLevel_full_optimization);
if (comp != NULL) {
tty->cr();
comp->print_timers();
}
tty->cr();
tty->print_cr(" Total compiled methods : %8d methods", total_compile_count);
tty->print_cr(" Standard compilation : %8d methods", standard_compile_count);
tty->print_cr(" On stack replacement : %8d methods", osr_compile_count);
int tcb = osr_bytes_compiled + standard_bytes_compiled;
tty->print_cr(" Total compiled bytecodes : %8d bytes", tcb);
tty->print_cr(" Standard compilation : %8d bytes", standard_bytes_compiled);
tty->print_cr(" On stack replacement : %8d bytes", osr_bytes_compiled);
double tcs = total_compilation.seconds();
int bps = tcs == 0.0 ? 0 : (int)(tcb / tcs);
tty->print_cr(" Average compilation speed : %8d bytes/s", bps);
tty->cr();
tty->print_cr(" nmethod code size : %8d bytes", nmethods_code_size);
tty->print_cr(" nmethod total size : %8d bytes", nmethods_size);
}
// Debugging output for failure
void CompileBroker::print_last_compile() {
if (_last_compile_level != CompLevel_none &&
compiler(_last_compile_level) != NULL &&
_last_compile_type != no_compile) {
if (_last_compile_type == osr_compile) {
tty->print_cr("Last parse: [osr]%d+++(%d) %s",
_osr_compilation_id, _last_compile_level, _last_method_compiled);
} else {
tty->print_cr("Last parse: %d+++(%d) %s",
_compilation_id, _last_compile_level, _last_method_compiled);
}
}
}
// Print general/accumulated JIT information.
void CompileBroker::print_info(outputStream *out) {
if (out == NULL) out = tty;
out->cr();
out->print_cr("======================");
out->print_cr(" General JIT info ");
out->print_cr("======================");
out->cr();
out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off");
out->print_cr(" Compiler threads : %7d", (int)CICompilerCount);
out->cr();
out->print_cr("CodeCache overview");
out->print_cr("--------------------------------------------------------");
out->cr();
out->print_cr(" Reserved size : " SIZE_FORMAT_W(7) " KB", CodeCache::max_capacity() / K);
out->print_cr(" Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K);
out->print_cr(" Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K);
out->cr();
out->cr();
out->print_cr("CodeCache cleaning overview");
out->print_cr("--------------------------------------------------------");
out->cr();
NMethodSweeper::print(out);
out->print_cr("--------------------------------------------------------");
out->cr();
}
// Note: tty_lock must not be held upon entry to this function.
// Print functions called from herein do "micro-locking" on tty_lock.
// That's a tradeoff which keeps together important blocks of output.
// At the same time, continuous tty_lock hold time is kept in check,
// preventing concurrently printing threads from stalling a long time.
void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) {
TimeStamp ts_total;
TimeStamp ts;
bool allFun = !strcmp(function, "all");
bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
bool methodCount = !strcmp(function, "MethodCount") || allFun;
bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
bool methodAge = !strcmp(function, "MethodAge") || allFun;
bool methodNames = !strcmp(function, "MethodNames") || allFun;
bool discard = !strcmp(function, "discard") || allFun;
if (out == NULL) {
out = tty;
}
if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
out->cr();
return;
}
ts_total.update(); // record starting point
if (aggregate) {
print_info(out);
}
// We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
// That helps us getting a consistent view on the CodeHeap, at least for the "all" function.
// When we request individual parts of the analysis via the jcmd interface, it is possible
// that in between another thread (another jcmd user or the vm running into CodeCache OOM)
// updated the aggregated data. That's a tolerable tradeoff because we can't hold a lock
// across user interaction.
ts.update(); // record starting point
MutexLockerEx mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
out->cr();
out->print_cr("__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________", ts.seconds());
out->cr();
if (aggregate) {
// It is sufficient to hold the CodeCache_lock only for the aggregate step.
// All other functions operate on aggregated data - except MethodNames, but that should be safe.
// The separate CodeHeapStateAnalytics_lock protects the printing functions against
// concurrent aggregate steps. Acquire this lock before acquiring the CodeCache_lock.
// CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
// leading to an unnecessarily long hold time of the CodeCache_lock.
ts.update(); // record starting point
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
out->cr();
out->print_cr("__ CodeCache lock wait took %10.3f seconds _________", ts.seconds());
out->cr();
ts.update(); // record starting point
CodeCache::aggregate(out, granularity);
out->cr();
out->print_cr("__ CodeCache lock hold took %10.3f seconds _________", ts.seconds());
out->cr();
}
if (usedSpace) CodeCache::print_usedSpace(out);
if (freeSpace) CodeCache::print_freeSpace(out);
if (methodCount) CodeCache::print_count(out);
if (methodSpace) CodeCache::print_space(out);
if (methodAge) CodeCache::print_age(out);
if (methodNames) CodeCache::print_names(out);
if (discard) CodeCache::discard(out);
out->cr();
out->print_cr("__ CodeHeapStateAnalytics total duration %10.3f seconds _________", ts_total.seconds());
out->cr();
}