Skip to content

Commit

Permalink
Add compilePortableCode query
Browse files Browse the repository at this point in the history
Add a new query to the TR::Compilation class to allow the compiler to
query whether the runtime is some kind of mode (such as
snapshot/restore) that would necessitate the compiler to generate
code that is portable (ie generated on one system but can run on
another). This is different from relocatable compiles because, for
example with snapshot/restore, the code does not need to be relocated
but it does need to be portable.

Signed-off-by: Irwin D'Souza <dsouzai.gh@gmail.com>
  • Loading branch information
dsouzai committed Jun 29, 2021
1 parent ec902f6 commit 3b2f598
Show file tree
Hide file tree
Showing 9 changed files with 44 additions and 38 deletions.
6 changes: 6 additions & 0 deletions compiler/compile/OMRCompilation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,12 @@ class OMR_EXTENSIBLE Compilation
//
bool compileRelocatableCode() { return false; }

// Is this compilation producing portable code? This should generally
// return true, for example, for compilations to be used in a
// snapshot/restore setting.
//
bool compilePortableCode() { return false; }

// Maximum number of internal pointers that can be managed.
//
int32_t maxInternalPointers();
Expand Down
6 changes: 3 additions & 3 deletions compiler/x/codegen/FPTreeEvaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1047,8 +1047,8 @@ TR::Register *OMR::X86::TreeEvaluator::fpConvertToInt(TR::Node *node, TR::Symbol
startLabel->setStartInternalControlFlow();
reStartLabel->setEndInternalControlFlow();

TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE) == cg->getX86ProcessorInfo().supportsSSE(), "supportsSSE() failed\n");
TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == cg->getX86ProcessorInfo().supportsSSE2(), "supportsSSE2() failed\n");
TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->compilePortableCode() || cg->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE) == cg->getX86ProcessorInfo().supportsSSE(), "supportsSSE() failed\n");
TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->compilePortableCode() || cg->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == cg->getX86ProcessorInfo().supportsSSE2(), "supportsSSE2() failed\n");

bool optimizeF2IWithSSE = ( node->getOpCodeValue() == TR::f2i &&
cg->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE) );
Expand Down Expand Up @@ -2215,7 +2215,7 @@ bool OMR::X86::TreeEvaluator::canUseFCOMIInstructions(TR::Node *node, TR::CodeGe
{
TR::ILOpCodes cmpOp = node->getOpCodeValue();

TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->target().cpu.supportsFCOMIInstructions() == cg->getX86ProcessorInfo().supportsFCOMIInstructions(), "supportsFCOMIInstuctions() failed\n");
TR_ASSERT_FATAL(cg->comp()->compileRelocatableCode() || cg->comp()->isOutOfProcessCompilation() || cg->comp()->compilePortableCode() || cg->comp()->target().cpu.supportsFCOMIInstructions() == cg->getX86ProcessorInfo().supportsFCOMIInstructions(), "supportsFCOMIInstuctions() failed\n");

return (!cg->comp()->target().cpu.supportsFCOMIInstructions() ||
cmpOp == TR::iffcmpneu ||
Expand Down
12 changes: 6 additions & 6 deletions compiler/x/codegen/IntegerMultiplyDecomposer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,12 @@ TR::Register *TR_X86IntegerMultiplyDecomposer::decomposeIntegerMultiplier(int32_
const integerMultiplyComposition& composition = _integerMultiplySolutions[decompositionIndex];
if (composition._subsequentShiftTooExpensive == false)
{
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELCORE2) == cg()->getX86ProcessorInfo().isIntelCore2(), "isIntelCore2 failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELNEHALEM) == cg()->getX86ProcessorInfo().isIntelNehalem(), "isIntelNehalem failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELWESTMERE) == cg()->getX86ProcessorInfo().isIntelWestmere(), "isIntelWestmere failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELSANDYBRIDGE) == cg()->getX86ProcessorInfo().isIntelSandyBridge(), "isIntelSandyBridge failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDFAMILY15H) == cg()->getX86ProcessorInfo().isAMD15h(), "isAMD15h failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDOPTERON) == cg()->getX86ProcessorInfo().isAMDOpteron(), "isAMDOpteron failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELCORE2) == cg()->getX86ProcessorInfo().isIntelCore2(), "isIntelCore2 failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELNEHALEM) == cg()->getX86ProcessorInfo().isIntelNehalem(), "isIntelNehalem failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELWESTMERE) == cg()->getX86ProcessorInfo().isIntelWestmere(), "isIntelWestmere failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELSANDYBRIDGE) == cg()->getX86ProcessorInfo().isIntelSandyBridge(), "isIntelSandyBridge failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDFAMILY15H) == cg()->getX86ProcessorInfo().isAMD15h(), "isAMD15h failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDOPTERON) == cg()->getX86ProcessorInfo().isAMDOpteron(), "isAMDOpteron failed\n");

target = generateDecompositionInstructions(decompositionIndex, tempRegArraySize, tempRegArray);
if (shiftAmount < 3 &&
Expand Down
36 changes: 18 additions & 18 deletions compiler/x/codegen/OMRCodeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,9 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
{
bool supportsSSE2 = false;

TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.isGenuineIntel() == _targetProcessorInfo.isGenuineIntel(), "isGenuineIntel() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.prefersMultiByteNOP() == _targetProcessorInfo.prefersMultiByteNOP(), "prefersMultiByteNOP() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.isGenuineIntel() == _targetProcessorInfo.isGenuineIntel(), "isGenuineIntel() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.prefersMultiByteNOP() == _targetProcessorInfo.prefersMultiByteNOP(), "prefersMultiByteNOP() failed\n");

// Pick a padding table
//
Expand All @@ -229,7 +229,7 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)

#if defined(TR_TARGET_X86)
#if !defined(J9HAMMER)
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == _targetProcessorInfo.supportsSSE2(), "supportsSSE2() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == _targetProcessorInfo.supportsSSE2(), "supportsSSE2() failed\n");

if (comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) && comp->target().cpu.testOSForSSESupport())
supportsSSE2 = true;
Expand All @@ -239,7 +239,7 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
#endif // !defined(J9HAMMER)
#endif // defined(TR_TARGET_X86)

TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_RTM) == _targetProcessorInfo.supportsTM(), "supportsTM() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_RTM) == _targetProcessorInfo.supportsTM(), "supportsTM() failed\n");

if (comp->target().cpu.supportsFeature(OMR_FEATURE_X86_RTM) && !comp->getOption(TR_DisableTM))
{
Expand All @@ -249,7 +249,7 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
*
* TODO: Need to figure out from which mode of Broadwell start supporting TM
*/
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELHASWELL) == _targetProcessorInfo.isIntelHaswell(), "isIntelHaswell() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELHASWELL) == _targetProcessorInfo.isIntelHaswell(), "isIntelHaswell() failed\n");
if (!comp->target().cpu.is(OMR_PROCESSOR_X86_INTELHASWELL))
{
if (comp->target().is64Bit())
Expand All @@ -268,7 +268,7 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)

// Choose the best XMM double precision load instruction for the target architecture.
//
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
static char *forceMOVLPD = feGetEnv("TR_forceMOVLPDforDoubleLoads");
if (comp->target().cpu.isAuthenticAMD() || forceMOVLPD)
{
Expand All @@ -284,8 +284,8 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
// Enable software prefetch of the TLH and configure the TLH prefetching
// geometry.
//
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELCORE2) == comp->cg()->getX86ProcessorInfo().isIntelCore2(), "isIntelCore2() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELNEHALEM) == comp->cg()->getX86ProcessorInfo().isIntelNehalem(), "isIntelNehalem() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELCORE2) == comp->cg()->getX86ProcessorInfo().isIntelCore2(), "isIntelCore2() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELNEHALEM) == comp->cg()->getX86ProcessorInfo().isIntelNehalem(), "isIntelNehalem() failed\n");
if (((!comp->getOption(TR_DisableTLHPrefetch) && (comp->target().cpu.is(OMR_PROCESSOR_X86_INTELCORE2) || comp->target().cpu.is(OMR_PROCESSOR_X86_INTELNEHALEM))) ||
(comp->getOption(TR_TLHPrefetch) && self()->targetSupportsSoftwarePrefetches())))
{
Expand Down Expand Up @@ -378,7 +378,7 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
static bool disableX86TRTO = feGetEnv("TR_disableX86TRTO") != NULL;
if (!disableX86TRTO)
{
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == self()->getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == self()->getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1() failed\n");
if (comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1))
{
self()->setSupportsArrayTranslateTRTO();
Expand All @@ -387,8 +387,8 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
static bool disableX86TROT = feGetEnv("TR_disableX86TROT") != NULL;
if (!disableX86TROT)
{
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == self()->getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == self()->getX86ProcessorInfo().supportsSSE2(), "supportsSSE4_1() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == self()->getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE2) == self()->getX86ProcessorInfo().supportsSSE2(), "supportsSSE4_1() failed\n");
if (comp->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1))
{
self()->setSupportsArrayTranslateTROT();
Expand Down Expand Up @@ -429,9 +429,9 @@ OMR::X86::CodeGenerator::initializeX86(TR::Compilation *comp)
// Make a conservative estimate of the boundary over which an executable instruction cannot
// be patched.
//
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.isGenuineIntel() == _targetProcessorInfo.isGenuineIntel(), "isGenuineIntel() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDFAMILY15H) == _targetProcessorInfo.isAMD15h(), "isAMD15h() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.isGenuineIntel() == _targetProcessorInfo.isGenuineIntel(), "isGenuineIntel() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.isAuthenticAMD() == _targetProcessorInfo.isAuthenticAMD(), "isAuthenticAMD() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.is(OMR_PROCESSOR_X86_AMDFAMILY15H) == _targetProcessorInfo.isAMD15h(), "isAMD15h() failed\n");
int32_t boundary;
if (comp->target().cpu.isGenuineIntel() || (comp->target().cpu.isAuthenticAMD() && comp->target().cpu.is(OMR_PROCESSOR_X86_AMDFAMILY15H)))
boundary = 32;
Expand Down Expand Up @@ -1004,7 +1004,7 @@ OMR::X86::CodeGenerator::getSupportsOpCodeForAutoSIMD(TR::ILOpCode opcode, TR::D
else
return false;
case TR::vmul:
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->getX86ProcessorInfo().supportsSSE4_1() == self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1), "supportsSSE4_1() failed\n");
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->comp()->compilePortableCode() || self()->getX86ProcessorInfo().supportsSSE4_1() == self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1), "supportsSSE4_1() failed\n");
if (dt == TR::Float || dt == TR::Double || (dt == TR::Int32 && self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1)))
return true;
else
Expand Down Expand Up @@ -1060,15 +1060,15 @@ OMR::X86::CodeGenerator::getSupportsOpCodeForAutoSIMD(TR::ILOpCode opcode, TR::D
bool
OMR::X86::CodeGenerator::getSupportsEncodeUtf16LittleWithSurrogateTest()
{
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == TR::CodeGenerator::getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1()");
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->comp()->compilePortableCode() || self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == TR::CodeGenerator::getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1()");
return self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) &&
!self()->comp()->getOption(TR_DisableSIMDUTF16LEEncoder);
}

bool
OMR::X86::CodeGenerator::getSupportsEncodeUtf16BigWithSurrogateTest()
{
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == TR::CodeGenerator::getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1()");
TR_ASSERT_FATAL(self()->comp()->compileRelocatableCode() || self()->comp()->isOutOfProcessCompilation() || self()->comp()->compilePortableCode() || self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) == TR::CodeGenerator::getX86ProcessorInfo().supportsSSE4_1(), "supportsSSE4_1()");
return self()->comp()->target().cpu.supportsFeature(OMR_FEATURE_X86_SSE4_1) &&
!self()->comp()->getOption(TR_DisableSIMDUTF16BEEncoder);
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/x/codegen/OMRInstOpCode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ template <typename TBuffer> typename TBuffer::cursor_t OMR::X86::InstOpCode::OpC
// Use AVX if possible

TR::Compilation *comp = TR::comp();
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->target().cpu.supportsAVX() == TR::CodeGenerator::getX86ProcessorInfo().supportsAVX(), "supportsAVX() failed\n");
TR_ASSERT_FATAL(comp->compileRelocatableCode() || comp->isOutOfProcessCompilation() || comp->compilePortableCode() || comp->target().cpu.supportsAVX() == TR::CodeGenerator::getX86ProcessorInfo().supportsAVX(), "supportsAVX() failed\n");

if (supportsAVX() && comp->target().cpu.supportsAVX())
{
Expand Down
Loading

0 comments on commit 3b2f598

Please sign in to comment.