Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] master from llvm:master #40

Merged
merged 10 commits into from
Aug 29, 2019
82 changes: 34 additions & 48 deletions clang/lib/Index/IndexingAction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,37 @@ using namespace clang::index;

namespace {

class IndexASTConsumer : public ASTConsumer {
class IndexPPCallbacks final : public PPCallbacks {
std::shared_ptr<IndexingContext> IndexCtx;

public:
IndexPPCallbacks(std::shared_ptr<IndexingContext> IndexCtx)
: IndexCtx(std::move(IndexCtx)) {}

void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
SourceRange Range, const MacroArgs *Args) override {
IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
Range.getBegin(), *MD.getMacroInfo());
}

void MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) override {
IndexCtx->handleMacroDefined(*MacroNameTok.getIdentifierInfo(),
MacroNameTok.getLocation(),
*MD->getMacroInfo());
}

void MacroUndefined(const Token &MacroNameTok, const MacroDefinition &MD,
const MacroDirective *Undef) override {
if (!MD.getMacroInfo()) // Ignore noop #undef.
return;
IndexCtx->handleMacroUndefined(*MacroNameTok.getIdentifierInfo(),
MacroNameTok.getLocation(),
*MD.getMacroInfo());
}
};

class IndexASTConsumer final : public ASTConsumer {
std::shared_ptr<Preprocessor> PP;
std::shared_ptr<IndexingContext> IndexCtx;

Expand All @@ -37,6 +67,7 @@ class IndexASTConsumer : public ASTConsumer {
IndexCtx->setASTContext(Context);
IndexCtx->getDataConsumer().initialize(Context);
IndexCtx->getDataConsumer().setPreprocessor(PP);
PP->addPPCallbacks(std::make_unique<IndexPPCallbacks>(IndexCtx));
}

bool HandleTopLevelDecl(DeclGroupRef DG) override {
Expand All @@ -55,36 +86,6 @@ class IndexASTConsumer : public ASTConsumer {
}
};

class IndexPPCallbacks : public PPCallbacks {
std::shared_ptr<IndexingContext> IndexCtx;

public:
IndexPPCallbacks(std::shared_ptr<IndexingContext> IndexCtx)
: IndexCtx(std::move(IndexCtx)) {}

void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
SourceRange Range, const MacroArgs *Args) override {
IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
Range.getBegin(), *MD.getMacroInfo());
}

void MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) override {
IndexCtx->handleMacroDefined(*MacroNameTok.getIdentifierInfo(),
MacroNameTok.getLocation(),
*MD->getMacroInfo());
}

void MacroUndefined(const Token &MacroNameTok, const MacroDefinition &MD,
const MacroDirective *Undef) override {
if (!MD.getMacroInfo()) // Ignore noop #undef.
return;
IndexCtx->handleMacroUndefined(*MacroNameTok.getIdentifierInfo(),
MacroNameTok.getLocation(),
*MD.getMacroInfo());
}
};

class IndexActionBase {
protected:
std::shared_ptr<IndexDataConsumer> DataConsumer;
Expand All @@ -101,16 +102,12 @@ class IndexActionBase {
IndexCtx);
}

std::unique_ptr<PPCallbacks> createIndexPPCallbacks() {
return std::make_unique<IndexPPCallbacks>(IndexCtx);
}

void finish() {
DataConsumer->finish();
}
};

class IndexAction : public ASTFrontendAction, IndexActionBase {
class IndexAction final : public ASTFrontendAction, IndexActionBase {
public:
IndexAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
IndexingOptions Opts)
Expand All @@ -122,18 +119,13 @@ class IndexAction : public ASTFrontendAction, IndexActionBase {
return createIndexASTConsumer(CI);
}

bool BeginSourceFileAction(clang::CompilerInstance &CI) override {
CI.getPreprocessor().addPPCallbacks(createIndexPPCallbacks());
return true;
}

void EndSourceFileAction() override {
FrontendAction::EndSourceFileAction();
finish();
}
};

class WrappingIndexAction : public WrapperFrontendAction, IndexActionBase {
class WrappingIndexAction final : public WrapperFrontendAction, IndexActionBase {
bool IndexActionFailed = false;

public:
Expand All @@ -158,12 +150,6 @@ class WrappingIndexAction : public WrapperFrontendAction, IndexActionBase {
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}

bool BeginSourceFileAction(clang::CompilerInstance &CI) override {
WrapperFrontendAction::BeginSourceFileAction(CI);
CI.getPreprocessor().addPPCallbacks(createIndexPPCallbacks());
return true;
}

void EndSourceFileAction() override {
// Invoke wrapped action's method.
WrapperFrontendAction::EndSourceFileAction();
Expand Down
4 changes: 3 additions & 1 deletion clang/lib/Sema/SemaChecking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -629,7 +629,9 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();

if (NumArgs < 4) {
S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args);
S.Diag(TheCall->getBeginLoc(),
diag::err_typecheck_call_too_few_args_at_least)
<< 0 << 4 << NumArgs;
return true;
}

Expand Down
2 changes: 2 additions & 0 deletions clang/test/SemaOpenCL/cl20-device-side-enqueue.cl
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,8 @@ kernel void enqueue_kernel_tests() {
enqueue_kernel(default_queue, flags, ndrange, 1, &event_wait_list, &evt); // expected-error{{illegal call to enqueue_kernel, incorrect argument types}}

enqueue_kernel(default_queue, flags, ndrange, 1, 1); // expected-error{{illegal call to enqueue_kernel, incorrect argument types}}

enqueue_kernel(default_queue, ndrange, ^{}); // expected-error{{too few arguments to function call, expected at least 4, have 3}}
}

// Diagnostic tests for get_kernel_work_group_size and allowed block parameter types in dynamic parallelism.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,13 +144,12 @@ def test_log_file(self):
self.complete_from_to('log enable lldb expr -f ' + src_dir,
['main.cpp'])

@skipIfWindows
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_log_dir(self):
# Complete our source directory.
src_dir = os.path.dirname(os.path.realpath(__file__))
self.complete_from_to('log enable lldb expr -f ' + src_dir,
[src_dir + "/"])
[src_dir + os.sep], turn_off_re_match=True)

# <rdar://problem/11052829>
@skipIfFreeBSD # timing out on the FreeBSD buildbot
Expand Down
47 changes: 43 additions & 4 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16430,12 +16430,51 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
}

/// Convert a disguised subvector insertion into a shuffle:
/// insert_vector_elt V, (bitcast X from vector type), IdxC -->
/// bitcast(shuffle (bitcast V), (extended X), Mask)
/// Note: We do not use an insert_subvector node because that requires a legal
/// subvector type.
SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {
SDValue InsertVal = N->getOperand(1);
SDValue Vec = N->getOperand(0);

// (insert_vector_elt (vector_shuffle X, Y), (extract_vector_elt X, N), InsIndex)
// --> (vector_shuffle X, Y)
if (Vec.getOpcode() == ISD::VECTOR_SHUFFLE && Vec.hasOneUse() &&
InsertVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isa<ConstantSDNode>(InsertVal.getOperand(1))) {
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Vec.getNode());
ArrayRef<int> Mask = SVN->getMask();

SDValue X = Vec.getOperand(0);
SDValue Y = Vec.getOperand(1);

// Vec's operand 0 is using indices from 0 to N-1 and
// operand 1 from N to 2N - 1, where N is the number of
// elements in the vectors.
int XOffset = -1;
if (InsertVal.getOperand(0) == X) {
XOffset = 0;
} else if (InsertVal.getOperand(0) == Y) {
XOffset = X.getValueType().getVectorNumElements();
}

if (XOffset != -1) {
SmallVector<int, 16> NewMask(Mask.begin(), Mask.end());

auto *ExtrIndex = cast<ConstantSDNode>(InsertVal.getOperand(1));
NewMask[InsIndex] = XOffset + ExtrIndex->getZExtValue();
assert(NewMask[InsIndex] < 2 * Vec.getValueType().getVectorNumElements() &&
NewMask[InsIndex] >= 0 && "NewMask[InsIndex] is out of bound");

SDValue LegalShuffle =
TLI.buildLegalVectorShuffle(Vec.getValueType(), SDLoc(N), X,
Y, NewMask, DAG);
if (LegalShuffle)
return LegalShuffle;
}
}

// insert_vector_elt V, (bitcast X from vector type), IdxC -->
// bitcast(shuffle (bitcast V), (extended X), Mask)
// Note: We do not use an insert_subvector node because that requires a
// legal subvector type.
if (InsertVal.getOpcode() != ISD::BITCAST || !InsertVal.hasOneUse() ||
!InsertVal.getOperand(0).getValueType().isVector())
return SDValue();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1627,7 +1627,6 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, SDValue &LHS,
MVT OpVT = LHS.getSimpleValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
NeedInvert = false;
bool NeedSwap = false;
switch (TLI.getCondCodeAction(CCCode, OpVT)) {
default: llvm_unreachable("Unknown condition code action!");
case TargetLowering::Legal:
Expand All @@ -1641,6 +1640,7 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, SDValue &LHS,
return true;
}
// Swapping operands didn't work. Try inverting the condition.
bool NeedSwap = false;
InvCC = getSetCCInverse(CCCode, OpVT.isInteger());
if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) {
// If inverting the condition is not enough, try swapping operands
Expand Down
14 changes: 7 additions & 7 deletions llvm/lib/Transforms/IPO/Attributor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,16 +428,16 @@ void IRPosition::verify() {
assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
"Expected call base or argument for positive attribute index!");
if (auto *Arg = dyn_cast<Argument>(AnchorVal)) {
assert(Arg->getArgNo() == unsigned(getArgNo()) &&
if (isa<Argument>(AnchorVal)) {
assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
"Argument number mismatch!");
assert(Arg == &getAssociatedValue() && "Associated value mismatch!");
assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
"Associated value mismatch!");
} else {
auto &CB = cast<CallBase>(*AnchorVal);
(void)CB;
assert(CB.arg_size() > unsigned(getArgNo()) &&
assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
"Call site argument number mismatch!");
assert(CB.getArgOperand(getArgNo()) == &getAssociatedValue() &&
assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
&getAssociatedValue() &&
"Associated value mismatch!");
}
break;
Expand Down
76 changes: 60 additions & 16 deletions llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3379,7 +3379,7 @@ foldICmpWithTruncSignExtendedVal(ICmpInst &I,
// we should move shifts to the same hand of 'and', i.e. rewrite as
// icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
// We are only interested in opposite logical shifts here.
// One of the shifts can be truncated. For now, it can only be 'shl'.
// One of the shifts can be truncated.
// If we can, we want to end up creating 'lshr' shift.
static Value *
foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
Expand Down Expand Up @@ -3413,14 +3413,6 @@ foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
"We did not look past any shifts while matching XShift though.");
bool HadTrunc = WidestTy != I.getOperand(0)->getType();

if (HadTrunc) {
// We did indeed have a truncation. For now, let's only proceed if the 'shl'
// was truncated, since that does not require any extra legality checks.
// FIXME: trunc-of-lshr.
if (!match(YShift, m_Shl(m_Value(), m_Value())))
return nullptr;
}

// If YShift is a 'lshr', swap the shifts around.
if (match(YShift, m_LShr(m_Value(), m_Value())))
std::swap(XShift, YShift);
Expand Down Expand Up @@ -3462,16 +3454,68 @@ foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
/*isNUW=*/false, SQ.getWithInstruction(&I)));
if (!NewShAmt)
return nullptr;
NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();

// Is the new shift amount smaller than the bit width?
// FIXME: could also rely on ConstantRange.
if (!match(NewShAmt, m_SpecificInt_ICMP(
ICmpInst::Predicate::ICMP_ULT,
APInt(NewShAmt->getType()->getScalarSizeInBits(),
WidestTy->getScalarSizeInBits()))))
if (!match(NewShAmt,
m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
APInt(WidestBitWidth, WidestBitWidth))))
return nullptr;

// An extra legality check is needed if we had trunc-of-lshr.
if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
WidestShift]() {
// It isn't obvious whether it's worth it to analyze non-constants here.
// Also, let's basically give up on non-splat cases, pessimizing vectors.
// If *any* of these preconditions matches we can perform the fold.
Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
? NewShAmt->getSplatValue()
: NewShAmt;
// If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
if (NewShAmtSplat &&
(NewShAmtSplat->isNullValue() ||
NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
return true;
// We consider *min* leading zeros so a single outlier
// blocks the transform as opposed to allowing it.
if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
KnownBits Known = computeKnownBits(C, SQ.DL);
unsigned MinLeadZero = Known.countMinLeadingZeros();
// If the value being shifted has at most lowest bit set we can fold.
unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
if (MaxActiveBits <= 1)
return true;
// Precondition: NewShAmt u<= countLeadingZeros(C)
if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
return true;
}
if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
KnownBits Known = computeKnownBits(C, SQ.DL);
unsigned MinLeadZero = Known.countMinLeadingZeros();
// If the value being shifted has at most lowest bit set we can fold.
unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
if (MaxActiveBits <= 1)
return true;
// Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
if (NewShAmtSplat) {
APInt AdjNewShAmt =
(WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
if (AdjNewShAmt.ule(MinLeadZero))
return true;
}
}
return false; // Can't tell if it's ok.
};
if (!CanFold())
return nullptr;
}

// All good, we can do this fold.
NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
X = Builder.CreateZExt(X, WidestTy);
Y = Builder.CreateZExt(Y, WidestTy);
// The shift is the same that was for X.
Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
? Builder.CreateLShr(X, NewShAmt)
Expand Down Expand Up @@ -4981,9 +5025,9 @@ llvm::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
// For scalars, SimplifyICmpInst should have already handled
// the edge cases for us, so we just assert on them.
// For vectors, we must handle the edge cases.
if (auto *CI = dyn_cast<ConstantInt>(C)) {
if (isa<ConstantInt>(C)) {
// A <= MAX -> TRUE ; A >= MIN -> TRUE
assert(ConstantIsOk(CI));
assert(ConstantIsOk(cast<ConstantInt>(C)));
} else if (Type->isVectorTy()) {
// TODO? If the edge cases for vectors were guaranteed to be handled as they
// are for scalar, we could remove the min/max checks. However, to do that,
Expand Down
Loading