From 9ddf62b86fc9745edc89b9490a37a0889347267f Mon Sep 17 00:00:00 2001 From: Kamil Dudka Date: Mon, 3 Sep 2012 22:52:28 +0200 Subject: [PATCH 1/4] cl: always write { on new line when opening fnc body Suggested by Jakub Filak. --- cl/builtins.cc | 6 ++- cl/callgraph.cc | 15 ++++--- cl/cl_chain.cc | 6 ++- cl/cl_dotgen.cc | 60 ++++++++++++++++--------- cl/cl_easy.cc | 3 +- cl/cl_factory.cc | 9 ++-- cl/cl_locator.cc | 6 ++- cl/cl_pp.cc | 60 ++++++++++++++++--------- cl/cl_storage.cc | 54 +++++++++++++++-------- cl/cl_typedot.cc | 21 ++++++--- cl/cldebug.cc | 63 +++++++++++++++++--------- cl/clf_intchk.cc | 66 ++++++++++++++++++---------- cl/clf_opchk.cc | 9 ++-- cl/clf_unilabel.cc | 12 +++-- cl/clf_unswitch.cc | 21 ++++++--- cl/clutil.cc | 27 ++++++++---- cl/code_listener.cc | 3 +- cl/killer.cc | 21 ++++++--- cl/loopscan.cc | 6 ++- cl/ssd.cc | 27 ++++++++---- cl/stopwatch.cc | 12 +++-- cl/storage.cc | 102 ++++++++++++++++++++++++++++--------------- cl/util.hh | 15 ++++--- fwnull/cl_fwnull.cc | 30 ++++++++----- include/cl/cl_msg.hh | 3 +- include/cl/clutil.hh | 15 ++++--- 26 files changed, 448 insertions(+), 224 deletions(-) diff --git a/cl/builtins.cc b/cl/builtins.cc index 563f4feb8..a4ca972ac 100644 --- a/cl/builtins.cc +++ b/cl/builtins.cc @@ -27,7 +27,8 @@ namespace CodeStorage { -bool isBuiltInFnc(const struct cl_operand &op) { +bool isBuiltInFnc(const struct cl_operand &op) +{ const char *name; if (!fncNameFromCst(&name, &op)) // likely indirect fuction call @@ -38,7 +39,8 @@ bool isBuiltInFnc(const struct cl_operand &op) { || STREQ("VK_ASSERT", name); } -bool isBuiltInCall(const Insn &insn) { +bool isBuiltInCall(const Insn &insn) +{ return isBuiltInFnc(insn.operands[/* fnc */ 1]); } diff --git a/cl/callgraph.cc b/cl/callgraph.cc index 64e1fa740..0b7f442cc 100644 --- a/cl/callgraph.cc +++ b/cl/callgraph.cc @@ -35,7 +35,8 @@ namespace CallGraph { typedef const struct cl_operand &TOp; typedef const Insn *TInsn; -Node* allocNodeIfNeeded(Graph &cg, Fnc *const fnc) { +Node* allocNodeIfNeeded(Graph &cg, Fnc *const fnc) +{ Node *&node = fnc->cgNode; if (node) return node; @@ -46,7 +47,8 @@ Node* allocNodeIfNeeded(Graph &cg, Fnc *const fnc) { return node; } -void handleCallback(Graph &cg, Node *node, const TInsn insn, TOp op) { +void handleCallback(Graph &cg, Node *node, const TInsn insn, TOp op) +{ int uid; if (!fncUidFromOperand(&uid, &op)) // not a function @@ -70,7 +72,8 @@ void handleCallback(Graph &cg, Node *node, const TInsn insn, TOp op) { cg.hasCallback = true; } -void handleCall(Graph &cg, Node *node, const TInsn insn) { +void handleCall(Graph &cg, Node *node, const TInsn insn) +{ // if there is a call, it is no longer a leaf node cg.leaves.erase(node); @@ -92,7 +95,8 @@ void handleCall(Graph &cg, Node *node, const TInsn insn) { cg.roots.erase(targetNode); } -void handleFnc(Fnc *const fnc) { +void handleFnc(Fnc *const fnc) +{ Graph &cg = fnc->stor->callGraph; Node *const node = allocNodeIfNeeded(cg, fnc); @@ -114,7 +118,8 @@ void handleFnc(Fnc *const fnc) { } } -void buildCallGraph(const Storage &stor) { +void buildCallGraph(const Storage &stor) +{ StopWatch watch; BOOST_FOREACH(Fnc *fnc, stor.fncs) diff --git a/cl/cl_chain.cc b/cl/cl_chain.cc index 3b978d751..db8be307a 100644 --- a/cl/cl_chain.cc +++ b/cl/cl_chain.cc @@ -103,11 +103,13 @@ class ClChain: public ICodeListener { } \ } while (0) -ClChain::~ClChain() { +ClChain::~ClChain() +{ CL_CHAIN_FOREACH(destroy); } -void ClChain::append(cl_code_listener *item) { +void ClChain::append(cl_code_listener *item) +{ list_.push_back(item); } diff --git a/cl/cl_dotgen.cc b/cl/cl_dotgen.cc index aee842f3a..652cbe890 100644 --- a/cl/cl_dotgen.cc +++ b/cl/cl_dotgen.cc @@ -228,11 +228,13 @@ void ClDotGenerator::createDotFile(std::ofstream &str, std::string fileName, CL_ERROR("unable to create file '" << fileName << "'"); } -void ClDotGenerator::closeSub(std::ofstream &str) { +void ClDotGenerator::closeSub(std::ofstream &str) +{ str << "}" << std::endl; } -void ClDotGenerator::closeDot(std::ofstream &str) { +void ClDotGenerator::closeDot(std::ofstream &str) +{ ClDotGenerator::closeSub(str); if (!str) @@ -253,21 +255,25 @@ ClDotGenerator::ClDotGenerator(const char *glDotFile): } } -ClDotGenerator::~ClDotGenerator() { +ClDotGenerator::~ClDotGenerator() +{ if (hasGlDotFile_) this->closeDot(glOut_); } -void ClDotGenerator::acknowledge() { +void ClDotGenerator::acknowledge() +{ // we haven't been waiting for acknowledge anyway, sorry... } -void ClDotGenerator::gobbleEdge(std::string dst, EdgeType type) { +void ClDotGenerator::gobbleEdge(std::string dst, EdgeType type) +{ perBbEdgeMap_[dst] = type; perFncEdgeMap_[dst] = type; } -void ClDotGenerator::emitEdge(std::string dst, EdgeType type) { +void ClDotGenerator::emitEdge(std::string dst, EdgeType type) +{ switch (type) { case ET_LC_CALL: case ET_LC_CALL_INDIR: @@ -291,7 +297,8 @@ void ClDotGenerator::emitEdge(std::string dst, EdgeType type) { << " [color=" << EtColors[type] << "];" << std::endl; } -void ClDotGenerator::emitBb() { +void ClDotGenerator::emitBb() +{ // colorize current BB node perFileOut_ << "\t" << SL_QUOTE_BB(bb_) << " [color=" << NtColors[nodeType_] @@ -321,7 +328,8 @@ void ClDotGenerator::emitCallSet(std::ofstream &str, TCallSet &cs, } } -void ClDotGenerator::emitPendingCalls() { +void ClDotGenerator::emitPendingCalls() +{ TCallMultiMap::iterator i; for (i = perFncCalls_.begin(); i != perFncCalls_.end(); ++i) { const string &dst = i->first; @@ -351,7 +359,8 @@ void ClDotGenerator::emitPendingCalls() { perFncEdgeMap_.clear(); } -void ClDotGenerator::emitFncEntry(const char *label) { +void ClDotGenerator::emitFncEntry(const char *label) +{ FILE_FNC_STREAM(SL_SUBGRAPH(fnc_ << "." << label, fnc_ << "() at " << loc_.file << ":" << loc_.line) << "\tcolor=blue;" << std::endl @@ -368,7 +377,8 @@ void ClDotGenerator::emitFncEntry(const char *label) { perFileOut_ << "\tURL=" << SL_QUOTE_URL(fnc_) << ";" << std::endl; } -void ClDotGenerator::emitInsnJmp(const char *label) { +void ClDotGenerator::emitInsnJmp(const char *label) +{ perFncOut_ << "\t" << SL_QUOTE_BB(bb_ << SL_BB_POS_SUFFIX) << " [shape=box, color=black, fontcolor=black," << " style=bold, label=goto];" << std::endl; @@ -396,7 +406,8 @@ void ClDotGenerator::emitInsnCond(const char *then_label, << " [color=green];" << std::endl; } -void ClDotGenerator::emitOpIfNeeded() { +void ClDotGenerator::emitOpIfNeeded() +{ switch (lastInsn_) { case CL_INSN_UNOP: case CL_INSN_BINOP: @@ -417,7 +428,8 @@ void ClDotGenerator::emitOpIfNeeded() { << std::endl; } -void ClDotGenerator::emitInsnCall() { +void ClDotGenerator::emitInsnCall() +{ perFncOut_ << "\t" << SL_QUOTE_BB(bb_ << SL_BB_POS_SUFFIX) << " [shape=box, color=blue, fontcolor=blue, style=dashed," << " label=call];" << std::endl; @@ -429,7 +441,8 @@ void ClDotGenerator::emitInsnCall() { << std::endl; } -void ClDotGenerator::checkForFncRef(const struct cl_operand *op) { +void ClDotGenerator::checkForFncRef(const struct cl_operand *op) +{ if (CL_OPERAND_CST != op->code) return; @@ -447,7 +460,8 @@ void ClDotGenerator::checkForFncRef(const struct cl_operand *op) { perBbCalls_[name].insert(str.str()); } -void ClDotGenerator::file_open(const char *file_name) { +void ClDotGenerator::file_open(const char *file_name) +{ CL_LOC_SET_FILE(loc_, file_name); ClDotGenerator::createDotFile(perFileOut_, file_name, true); perFileOut_ << SL_GRAPH(file_name); @@ -483,7 +497,8 @@ void ClDotGenerator::fnc_open(const struct cl_operand *fnc) << std::endl; } -void ClDotGenerator::fnc_arg_decl(int, const struct cl_operand *) { +void ClDotGenerator::fnc_arg_decl(int, const struct cl_operand *) +{ } void ClDotGenerator::fnc_close() @@ -500,7 +515,8 @@ void ClDotGenerator::fnc_close() bb_.clear(); } -void ClDotGenerator::bb_open(const char *bb_name) { +void ClDotGenerator::bb_open(const char *bb_name) +{ if (!bb_.empty()) // emit last BB this->emitBb(); @@ -516,7 +532,8 @@ void ClDotGenerator::bb_open(const char *bb_name) { << "\tURL=\"\";" << std::endl; } -void ClDotGenerator::insn(const struct cl_insn *cli) { +void ClDotGenerator::insn(const struct cl_insn *cli) +{ switch (cli->code) { case CL_INSN_NOP: case CL_INSN_LABEL: @@ -626,7 +643,8 @@ void ClDotGenerator::insn_call_open(const struct cl_loc *loc, CL_INSN_JMP; } -void ClDotGenerator::insn_call_arg(int, const struct cl_operand *arg_src) { +void ClDotGenerator::insn_call_arg(int, const struct cl_operand *arg_src) +{ this->checkForFncRef(arg_src); } @@ -655,7 +673,8 @@ void ClDotGenerator::insn_switch_case(const struct cl_loc *, << " [color=yellow];" << std::endl; } -void ClDotGenerator::insn_switch_close() { +void ClDotGenerator::insn_switch_close() +{ lastInsn_ = /* FIXME: we have no CL_INSN_SWITCH in enum cl_insn_e */ CL_INSN_JMP; @@ -663,6 +682,7 @@ void ClDotGenerator::insn_switch_close() { // ///////////////////////////////////////////////////////////////////////////// // public interface, see cl_dotgen.hh for more details -ICodeListener* createClDotGenerator(const char *args) { +ICodeListener* createClDotGenerator(const char *args) +{ return new ClDotGenerator(args); } diff --git a/cl/cl_easy.cc b/cl/cl_easy.cc index c0f19a840..7af3feb28 100644 --- a/cl/cl_easy.cc +++ b/cl/cl_easy.cc @@ -80,6 +80,7 @@ class ClEasy: public ClStorageBuilder { // ///////////////////////////////////////////////////////////////////////////// // interface, see cl_easy.hh for details -ICodeListener* createClEasy(const char *configString) { +ICodeListener* createClEasy(const char *configString) +{ return new ClEasy(configString); } diff --git a/cl/cl_factory.cc b/cl/cl_factory.cc index 92a49d401..d565cfc56 100644 --- a/cl/cl_factory.cc +++ b/cl/cl_factory.cc @@ -224,7 +224,8 @@ namespace { } } -ClfChainFactory::ClfChainFactory() { +ClfChainFactory::ClfChainFactory() +{ map_["unify_labels_gl"] = createClfUniLabelGl; map_["unify_labels_static"] = createClfUniLabelStatic; map_["unify_labels_fnc"] = createClfUniLabelFnc; @@ -302,11 +303,13 @@ ClFactory::ClFactory(): d->map["typedot"] = &createClTypeDotGenerator; } -ClFactory::~ClFactory() { +ClFactory::~ClFactory() +{ delete d; } -ICodeListener* ClFactory::create(const char *config_string) { +ICodeListener* ClFactory::create(const char *config_string) +{ CL_FACTORY_DEBUG("ClFactory: config_string: " << config_string); TStringMap args; diff --git a/cl/cl_locator.cc b/cl/cl_locator.cc index 4c623e5a9..2c5fbf048 100644 --- a/cl/cl_locator.cc +++ b/cl/cl_locator.cc @@ -98,13 +98,15 @@ ClLocator::ClLocator(): { } -void ClLocator::printLocation(const struct cl_loc *loc) { +void ClLocator::printLocation(const struct cl_loc *loc) +{ CL_DEBUG_MSG(cl_loc_fallback(loc, &lastLoc_), "linearized code follows..."); } // ///////////////////////////////////////////////////////////////////////////// // public interface, see cl_locator.hh for more details -ICodeListener* createClLocator(const char *) { +ICodeListener* createClLocator(const char *) +{ return new ClLocator; } diff --git a/cl/cl_pp.cc b/cl/cl_pp.cc index ad8f034c8..288df8b52 100644 --- a/cl/cl_pp.cc +++ b/cl/cl_pp.cc @@ -138,7 +138,8 @@ ClPrettyPrint::ClPrettyPrint(const char *fileName, bool showTypes): CL_ERROR("unable to create file '" << fileName << "'"); } -ClPrettyPrint::~ClPrettyPrint() { +ClPrettyPrint::~ClPrettyPrint() +{ if (fname_) fstr_.close(); } @@ -243,7 +244,8 @@ namespace { } } -void ClPrettyPrint::printIntegralCst(const struct cl_operand *op) { +void ClPrettyPrint::printIntegralCst(const struct cl_operand *op) +{ const struct cl_type *type = op->type; const int value = op->data.cst.data.cst_int.value; @@ -303,7 +305,8 @@ void ClPrettyPrint::printIntegralCst(const struct cl_operand *op) { } } -void ClPrettyPrint::printCst(const struct cl_operand *op) { +void ClPrettyPrint::printCst(const struct cl_operand *op) +{ enum cl_type_e code = op->data.cst.code; switch (code) { case CL_TYPE_INT: @@ -357,7 +360,8 @@ namespace { } } -void ClPrettyPrint::printBareType(const struct cl_type *clt, bool expandFnc) { +void ClPrettyPrint::printBareType(const struct cl_type *clt, bool expandFnc) +{ string str; for (; clt; clt = clt->items[0].type) { enum cl_type_e code = clt->code; @@ -452,7 +456,8 @@ void ClPrettyPrint::printBareType(const struct cl_type *clt, bool expandFnc) { } } -void ClPrettyPrint::printVarType(const struct cl_operand *op) { +void ClPrettyPrint::printVarType(const struct cl_operand *op) +{ if (op->code == CL_OPERAND_VOID) CL_TRAP; @@ -480,7 +485,8 @@ namespace { } } -void ClPrettyPrint::printNestedVar(const struct cl_operand *op) { +void ClPrettyPrint::printNestedVar(const struct cl_operand *op) +{ switch (op->code) { case CL_OPERAND_VAR: if (!op->data.var->name) { @@ -520,7 +526,8 @@ namespace { } } -void ClPrettyPrint::printOffsetAccessor(const int off) { +void ClPrettyPrint::printOffsetAccessor(const int off) +{ out_ << ssd::Color(C_LIGHT_RED) << "<"; if (0 <= off) @@ -529,7 +536,8 @@ void ClPrettyPrint::printOffsetAccessor(const int off) { out_ << off << ">" << ssd::Color(C_NO_COLOR); } -void ClPrettyPrint::printRecordAccessor(const struct cl_accessor **ac) { +void ClPrettyPrint::printRecordAccessor(const struct cl_accessor **ac) +{ std::string tag; int offset = 0; readItemAccessInfo(*ac, &tag, &offset); @@ -544,7 +552,8 @@ void ClPrettyPrint::printRecordAccessor(const struct cl_accessor **ac) { out_ << SSD_INLINE_COLOR(C_CYAN, "[+" << offset << "]") << tag; } -void ClPrettyPrint::printOperandVar(const struct cl_operand *op) { +void ClPrettyPrint::printOperandVar(const struct cl_operand *op) +{ const struct cl_accessor *ac = op->accessor; this->printVarType(op); @@ -606,7 +615,8 @@ void ClPrettyPrint::printOperandVar(const struct cl_operand *op) { } } -void ClPrettyPrint::printOperand(const struct cl_operand *op) { +void ClPrettyPrint::printOperand(const struct cl_operand *op) +{ if (!op) { CL_DEBUG_MSG(&loc_, "no operand given to " << __FUNCTION__); return; @@ -629,7 +639,8 @@ void ClPrettyPrint::printOperand(const struct cl_operand *op) { } } -void ClPrettyPrint::printAssignmentLhs(const struct cl_operand *lhs) { +void ClPrettyPrint::printAssignmentLhs(const struct cl_operand *lhs) +{ if (!lhs || lhs->code == CL_OPERAND_VOID) { CL_DEBUG_MSG(&loc_, "no lhs given to " << __FUNCTION__); return; @@ -641,13 +652,15 @@ void ClPrettyPrint::printAssignmentLhs(const struct cl_operand *lhs) { << " "; } -void ClPrettyPrint::printInsnNop(const struct cl_insn *) { +void ClPrettyPrint::printInsnNop(const struct cl_insn *) +{ out_ << "\t\t" << SSD_INLINE_COLOR(C_LIGHT_RED, "nop") << std::endl; } -void ClPrettyPrint::printInsnJmp(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnJmp(const struct cl_insn *cli) +{ if (printingArgDecls_) { printingArgDecls_ = false; out_ << SSD_INLINE_COLOR(C_LIGHT_RED, ")") << ":" @@ -661,7 +674,8 @@ void ClPrettyPrint::printInsnJmp(const struct cl_insn *cli) { << std::endl; } -void ClPrettyPrint::printInsnCond(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnCond(const struct cl_insn *cli) +{ const struct cl_operand *src = cli->data.insn_cond.src; const char *label_true = cli->data.insn_cond.then_label; const char *label_false = cli->data.insn_cond.else_label; @@ -689,7 +703,8 @@ void ClPrettyPrint::printInsnCond(const struct cl_insn *cli) { << std::endl; } -void ClPrettyPrint::printInsnRet(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnRet(const struct cl_insn *cli) +{ const struct cl_operand *src = cli->data.insn_ret.src; out_ << "\t\t" @@ -703,13 +718,15 @@ void ClPrettyPrint::printInsnRet(const struct cl_insn *cli) { out_ << std::endl; } -void ClPrettyPrint::printInsnAbort(const struct cl_insn *) { +void ClPrettyPrint::printInsnAbort(const struct cl_insn *) +{ out_ << "\t\t" << SSD_INLINE_COLOR(C_LIGHT_RED, "abort") << std::endl; } -void ClPrettyPrint::printInsnUnop(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnUnop(const struct cl_insn *cli) +{ const enum cl_unop_e code = cli->data.insn_unop.code; const struct cl_operand *dst = cli->data.insn_unop.dst; const struct cl_operand *src = cli->data.insn_unop.src; @@ -748,7 +765,8 @@ void ClPrettyPrint::printInsnUnop(const struct cl_insn *cli) { out_ << std::endl; } -void ClPrettyPrint::printInsnBinop(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnBinop(const struct cl_insn *cli) +{ const enum cl_binop_e code = cli->data.insn_binop.code; const struct cl_operand *dst = cli->data.insn_binop.dst; const struct cl_operand *src1 = cli->data.insn_binop.src1; @@ -865,7 +883,8 @@ void ClPrettyPrint::printInsnBinop(const struct cl_insn *cli) { out_ << SSD_INLINE_COLOR(C_LIGHT_RED, ")") << std::endl; } -void ClPrettyPrint::printInsnLabel(const struct cl_insn *cli) { +void ClPrettyPrint::printInsnLabel(const struct cl_insn *cli) +{ const char *name = cli->data.insn_label.name; if (!name) return; @@ -1009,7 +1028,8 @@ void ClPrettyPrint::insn_switch_close() // ///////////////////////////////////////////////////////////////////////////// // public interface, see cl_pp.hh for more details -ICodeListener* createClPrettyPrint(const char *args, bool showTypes) { +ICodeListener* createClPrettyPrint(const char *args, bool showTypes) +{ // check whether a file name is given return (args && *args) ? new ClPrettyPrint(/* file name */ args, showTypes) diff --git a/cl/cl_storage.cc b/cl/cl_storage.cc index 68c01751f..82899f136 100644 --- a/cl/cl_storage.cc +++ b/cl/cl_storage.cc @@ -347,12 +347,14 @@ ClStorageBuilder::ClStorageBuilder(): { } -ClStorageBuilder::~ClStorageBuilder() { +ClStorageBuilder::~ClStorageBuilder() +{ releaseStorage(d->stor); delete d; } -void ClStorageBuilder::acknowledge() { +void ClStorageBuilder::acknowledge() +{ this->run(d->stor); } @@ -377,7 +379,8 @@ void ClStorageBuilder::Private::digInitials(const TOp *op) } } -EVar varCodeByScope(const enum cl_scope_e scope, const bool isArgDecl) { +EVar varCodeByScope(const enum cl_scope_e scope, const bool isArgDecl) +{ switch (scope) { case CL_SCOPE_GLOBAL: case CL_SCOPE_STATIC: @@ -394,7 +397,8 @@ EVar varCodeByScope(const enum cl_scope_e scope, const bool isArgDecl) { } } -bool ClStorageBuilder::Private::digOperandVar(const TOp *op, bool isArgDecl) { +bool ClStorageBuilder::Private::digOperandVar(const TOp *op, bool isArgDecl) +{ const int id = varIdFromOperand(op); // mark as used in the current function @@ -431,7 +435,8 @@ bool ClStorageBuilder::Private::digOperandVar(const TOp *op, bool isArgDecl) { return true; } -void ClStorageBuilder::Private::digOperandCst(const struct cl_operand *op) { +void ClStorageBuilder::Private::digOperandCst(const struct cl_operand *op) +{ const struct cl_cst &cst = op->data.cst; if (CL_TYPE_FNC != cst.code) // we are interested only in fncs for now @@ -468,7 +473,8 @@ void ClStorageBuilder::Private::digOperandCst(const struct cl_operand *op) { nameMap[name] = uid; } -void ClStorageBuilder::Private::digOperand(const TOp *op) { +void ClStorageBuilder::Private::digOperand(const TOp *op) +{ if (!op || CL_OPERAND_VOID == op->code) return; @@ -518,7 +524,8 @@ void ClStorageBuilder::Private::digOperand(const TOp *op) { this->digInitials(op); } -void ClStorageBuilder::Private::openInsn(Insn *newInsn) { +void ClStorageBuilder::Private::openInsn(Insn *newInsn) +{ // set pointer to the owning instance of Storage newInsn->stor = &this->stor; @@ -530,7 +537,8 @@ void ClStorageBuilder::Private::openInsn(Insn *newInsn) { insn = newInsn; } -void ClStorageBuilder::Private::closeInsn() { +void ClStorageBuilder::Private::closeInsn() +{ TOperandList &operands = insn->operands; BOOST_FOREACH(const struct cl_operand &op, operands) { this->digOperand(&op); @@ -548,20 +556,23 @@ void ClStorageBuilder::Private::closeInsn() { insn = 0; } -void ClStorageBuilder::file_open(const char *fileName) { +void ClStorageBuilder::file_open(const char *fileName) +{ if (!fileName) CL_TRAP; d->file = fileName; } -void ClStorageBuilder::file_close() { +void ClStorageBuilder::file_close() +{ // let it honestly crash if callback sequence is incorrect since this should // have already been caught by ClfCbSeqChk cl filter d->file = 0; } -void ClStorageBuilder::fnc_open(const struct cl_operand *op) { +void ClStorageBuilder::fnc_open(const struct cl_operand *op) +{ if (CL_OPERAND_CST != op->code) CL_TRAP; @@ -590,7 +601,8 @@ void ClStorageBuilder::fnc_open(const struct cl_operand *op) { d->bb = 0; } -void ClStorageBuilder::fnc_arg_decl(int pos, const struct cl_operand *op) { +void ClStorageBuilder::fnc_arg_decl(int pos, const struct cl_operand *op) +{ if (CL_OPERAND_VAR != op->code) CL_TRAP; @@ -603,18 +615,21 @@ void ClStorageBuilder::fnc_arg_decl(int pos, const struct cl_operand *op) { (void) pos; } -void ClStorageBuilder::fnc_close() { +void ClStorageBuilder::fnc_close() +{ // let it honestly crash if callback sequence is incorrect since this should // have already been caught by ClfCbSeqChk cl filter d->fnc = 0; } -void ClStorageBuilder::bb_open(const char *bb_name) { +void ClStorageBuilder::bb_open(const char *bb_name) +{ ControlFlow &cfg = d->fnc->cfg; d->bb = cfg[bb_name]; } -void ClStorageBuilder::insn(const struct cl_insn *cli) { +void ClStorageBuilder::insn(const struct cl_insn *cli) +{ if (!d->bb) // FIXME: this simply ignores 'jump to entry' insn return; @@ -649,14 +664,16 @@ void ClStorageBuilder::insn_call_open( d->openInsn(insn); } -void ClStorageBuilder::insn_call_arg(int, const struct cl_operand *arg_src) { +void ClStorageBuilder::insn_call_arg(int, const struct cl_operand *arg_src) +{ TOperandList &operands = d->insn->operands; unsigned idx = operands.size(); operands.resize(idx + 1); storeOperand(operands[idx], arg_src); } -void ClStorageBuilder::insn_call_close() { +void ClStorageBuilder::insn_call_close() +{ d->closeInsn(); // switch back preventing for next instructions @@ -737,6 +754,7 @@ void ClStorageBuilder::insn_switch_case( } } -void ClStorageBuilder::insn_switch_close() { +void ClStorageBuilder::insn_switch_close() +{ d->closeInsn(); } diff --git a/cl/cl_typedot.cc b/cl/cl_typedot.cc index 5e66e3ac6..0836cf64d 100644 --- a/cl/cl_typedot.cc +++ b/cl/cl_typedot.cc @@ -203,7 +203,8 @@ ClTypeDotGenerator::ClTypeDotGenerator(const char *glDotFile) << "\tlabelloc=t;" << std::endl; } -ClTypeDotGenerator::~ClTypeDotGenerator() { +ClTypeDotGenerator::~ClTypeDotGenerator() +{ glOut_ << "}" << std::endl; if (!glOut_) { CL_WARN("error detected while closing a file"); @@ -211,7 +212,8 @@ ClTypeDotGenerator::~ClTypeDotGenerator() { glOut_.close(); } -void ClTypeDotGenerator::acknowledge() { +void ClTypeDotGenerator::acknowledge() +{ // we haven't been waiting for acknowledge anyway, sorry... } @@ -228,7 +230,8 @@ namespace { } // FIXME: copy pasted from ClPrettyPrint::printVarType -void ClTypeDotGenerator::printType(const struct cl_type *clt) { +void ClTypeDotGenerator::printType(const struct cl_type *clt) +{ string str; for (; clt; clt = clt->items[0].type) { enum cl_type_e code = clt->code; @@ -312,7 +315,8 @@ void ClTypeDotGenerator::gobbleEdge(cl_type_uid_t src, cl_type_uid_t dst, pendingEdges_.push_back(Edge(src, dst, code, strLabel)); } -void ClTypeDotGenerator::emitPendingEdges() { +void ClTypeDotGenerator::emitPendingEdges() +{ TEdgeList::iterator i; for (i = pendingEdges_.begin(); i != pendingEdges_.end(); ++i) { const Edge &e = *i; @@ -372,7 +376,8 @@ void ClTypeDotGenerator::digOneType(const struct cl_type *type, TStack &st) } } -void ClTypeDotGenerator::handleType(const struct cl_type *clt) { +void ClTypeDotGenerator::handleType(const struct cl_type *clt) +{ TStack st; st.push(clt); @@ -391,7 +396,8 @@ void ClTypeDotGenerator::handleType(const struct cl_type *clt) { this->emitPendingEdges(); } -void ClTypeDotGenerator::handleOperand(const struct cl_operand *op) { +void ClTypeDotGenerator::handleOperand(const struct cl_operand *op) +{ if (!op || op->code == CL_OPERAND_VOID) return; @@ -404,6 +410,7 @@ void ClTypeDotGenerator::handleOperand(const struct cl_operand *op) { // ///////////////////////////////////////////////////////////////////////////// // public interface, see cl_typedot.hh for more details -ICodeListener* createClTypeDotGenerator(const char *args) { +ICodeListener* createClTypeDotGenerator(const char *args) +{ return new ClTypeDotGenerator(args); } diff --git a/cl/cldebug.cc b/cl/cldebug.cc index f99f30654..5e9db0930 100644 --- a/cl/cldebug.cc +++ b/cl/cldebug.cc @@ -26,7 +26,8 @@ #include -void cltToStreamCore(std::ostream &out, const struct cl_type *clt) { +void cltToStreamCore(std::ostream &out, const struct cl_type *clt) +{ out << "*((const struct cl_type *)" << static_cast(clt) << ")"; @@ -99,7 +100,8 @@ class DumpCltVisitor { } }; -void cltToStream(std::ostream &out, const struct cl_type *clt, unsigned depth) { +void cltToStream(std::ostream &out, const struct cl_type *clt, unsigned depth) +{ if (!depth) { cltToStreamCore(out, clt); return; @@ -119,7 +121,8 @@ void cltToStream(std::ostream &out, const struct cl_type *clt, unsigned depth) { traverseTypeIc(clt, visitor); } -void acToStream(std::ostream &out, const struct cl_accessor *ac, bool oneline) { +void acToStream(std::ostream &out, const struct cl_accessor *ac, bool oneline) +{ if (!ac) { out << "(empty)"; if (!oneline) @@ -172,7 +175,8 @@ void acToStream(std::ostream &out, const struct cl_accessor *ac, bool oneline) { namespace { -void operandToStreamCstInt(std::ostream &str, const struct cl_operand &op) { +void operandToStreamCstInt(std::ostream &str, const struct cl_operand &op) +{ const struct cl_cst &cst = op.data.cst; const int val = cst.data.cst_int.value; @@ -206,7 +210,8 @@ void operandToStreamCstInt(std::ostream &str, const struct cl_operand &op) { } } -void operandToStreamCst(std::ostream &str, const struct cl_operand &op) { +void operandToStreamCst(std::ostream &str, const struct cl_operand &op) +{ const struct cl_cst &cst = op.data.cst; const enum cl_type_e code = cst.code; switch (code) { @@ -246,7 +251,8 @@ void operandToStreamCst(std::ostream &str, const struct cl_operand &op) { } } -const char* fieldName(const struct cl_accessor *ac) { +const char* fieldName(const struct cl_accessor *ac) +{ CL_BREAK_IF(!ac || ac->code != CL_ACCESSOR_ITEM); const struct cl_type *clt = ac->type; @@ -261,7 +267,8 @@ const char* fieldName(const struct cl_accessor *ac) { : ""; } -void arrayIdxToStream(std::ostream &str, const struct cl_operand *idx) { +void arrayIdxToStream(std::ostream &str, const struct cl_operand *idx) +{ if (CL_OPERAND_CST != idx->code) { str << "[...]"; return; @@ -277,7 +284,8 @@ void arrayIdxToStream(std::ostream &str, const struct cl_operand *idx) { str << "[" << cst.data.cst_int.value << "]"; } -void operandToStreamAcs(std::ostream &str, const struct cl_accessor *ac) { +void operandToStreamAcs(std::ostream &str, const struct cl_accessor *ac) +{ if (!ac) return; @@ -320,7 +328,8 @@ void operandToStreamAcs(std::ostream &str, const struct cl_accessor *ac) { } } -void operandToStreamVar(std::ostream &str, const struct cl_operand &op) { +void operandToStreamVar(std::ostream &str, const struct cl_operand &op) +{ const struct cl_accessor *ac = op.accessor; // FIXME: copy/pasted from cl_pp.cc @@ -353,7 +362,8 @@ void operandToStreamVar(std::ostream &str, const struct cl_operand &op) { } // namespace -void operandToStream(std::ostream &str, const struct cl_operand &op) { +void operandToStream(std::ostream &str, const struct cl_operand &op) +{ const enum cl_operand_e code = op.code; switch (code) { case CL_OPERAND_VOID: @@ -442,7 +452,8 @@ void binOpToStream(std::ostream &str, int subCode, str << ")"; } -void callToStream(std::ostream &str, const CodeStorage::TOperandList &opList) { +void callToStream(std::ostream &str, const CodeStorage::TOperandList &opList) +{ const struct cl_operand &dst = opList[/* dst */ 0]; if (CL_OPERAND_VOID != dst.code) { operandToStream(str, dst); @@ -460,7 +471,8 @@ void callToStream(std::ostream &str, const CodeStorage::TOperandList &opList) { str << ")"; } -void retToStream(std::ostream &str, const struct cl_operand &src) { +void retToStream(std::ostream &str, const struct cl_operand &src) +{ str << "return"; if (CL_OPERAND_VOID == src.code) @@ -472,7 +484,8 @@ void retToStream(std::ostream &str, const struct cl_operand &src) { } // namespace -void insnToStream(std::ostream &str, const CodeStorage::Insn &insn) { +void insnToStream(std::ostream &str, const CodeStorage::Insn &insn) +{ const CodeStorage::TOperandList &opList = insn.operands; const CodeStorage::TTargetList &tList = insn.targets; @@ -533,34 +546,42 @@ void insnToStream(std::ostream &str, const CodeStorage::Insn &insn) { using std::cout; -void cl_dump(const struct cl_type *clt) { +void cl_dump(const struct cl_type *clt) +{ cltToStream(cout, clt, /* depth */ 3U); } -void cl_dump(const struct cl_type *clt, unsigned depth) { +void cl_dump(const struct cl_type *clt, unsigned depth) +{ cltToStream(cout, clt, depth); } -void cl_dump(const struct cl_accessor *ac) { +void cl_dump(const struct cl_accessor *ac) +{ acToStream(cout, ac, /* oneline */ false); } -void cl_dump(const struct cl_accessor &ac) { +void cl_dump(const struct cl_accessor &ac) +{ cl_dump(&ac); } -void cl_dump(const struct cl_operand &op) { +void cl_dump(const struct cl_operand &op) +{ cout << op << "\n"; } -void cl_dump(const struct cl_operand *op) { +void cl_dump(const struct cl_operand *op) +{ cl_dump(*op); } -void cl_dump(const struct CodeStorage::Insn &insn) { +void cl_dump(const struct CodeStorage::Insn &insn) +{ cout << insn << "\n"; } -void cl_dump(const struct CodeStorage::Insn *insn) { +void cl_dump(const struct CodeStorage::Insn *insn) +{ cl_dump(*insn); } diff --git a/cl/clf_intchk.cc b/cl/clf_intchk.cc index 1e92e279b..d8a1c3437 100644 --- a/cl/clf_intchk.cc +++ b/cl/clf_intchk.cc @@ -394,7 +394,8 @@ ClfCbSeqChk::ClfCbSeqChk(ICodeListener *slave): { } -const char* ClfCbSeqChk::toString(EState state) { +const char* ClfCbSeqChk::toString(EState state) +{ #define CASE_TO_STRING(state) case state: return #state; switch (state) { CASE_TO_STRING(S_INIT) @@ -412,16 +413,19 @@ const char* ClfCbSeqChk::toString(EState state) { } } -void ClfCbSeqChk::emitUnexpected(const char *what) { +void ClfCbSeqChk::emitUnexpected(const char *what) +{ CL_ERROR_MSG(&loc_, "unexpected callback in state " << toString(state_) << " (" << what << ")"); } -void ClfCbSeqChk::emitUnexpected(EState state) { +void ClfCbSeqChk::emitUnexpected(EState state) +{ this->emitUnexpected(toString(state)); } -void ClfCbSeqChk::setState(EState newState) { +void ClfCbSeqChk::setState(EState newState) +{ switch (state_) { case S_INIT: switch (newState) { @@ -488,17 +492,20 @@ void ClfCbSeqChk::setState(EState newState) { state_ = newState; } -void ClfCbSeqChk::chkArgDecl() { +void ClfCbSeqChk::chkArgDecl() +{ if (S_FNC_DECL != state_) this->emitUnexpected("fnc_arg_decl"); } -void ClfCbSeqChk::chkInsnNop() { +void ClfCbSeqChk::chkInsnNop() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_NOP"); } -void ClfCbSeqChk::chkInsnJmp() { +void ClfCbSeqChk::chkInsnJmp() +{ switch (state_) { case S_FNC_DECL: case S_BLOCK_LEVEL: @@ -511,60 +518,70 @@ void ClfCbSeqChk::chkInsnJmp() { state_ = S_FNC_BODY; } -void ClfCbSeqChk::chkInsnCond() { +void ClfCbSeqChk::chkInsnCond() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_COND"); state_ = S_FNC_BODY; } -void ClfCbSeqChk::chkInsnRet() { +void ClfCbSeqChk::chkInsnRet() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_RET"); state_ = S_FNC_BODY; } -void ClfCbSeqChk::chkInsnAbort() { +void ClfCbSeqChk::chkInsnAbort() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_ABORT"); state_ = S_FNC_BODY; } -void ClfCbSeqChk::chkInsnUnop() { +void ClfCbSeqChk::chkInsnUnop() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_UNOP"); } -void ClfCbSeqChk::chkInsnBinop() { +void ClfCbSeqChk::chkInsnBinop() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_BINOP"); } -void ClfCbSeqChk::chkInsnLabel() { +void ClfCbSeqChk::chkInsnLabel() +{ if (S_BLOCK_LEVEL != state_) this->emitUnexpected("CL_INSN_LABEL"); } -void ClfCbSeqChk::chkInsnCallArg() { +void ClfCbSeqChk::chkInsnCallArg() +{ if (S_INSN_CALL != state_) this->emitUnexpected("insn_call_arg"); } -void ClfCbSeqChk::setCallClose() { +void ClfCbSeqChk::setCallClose() +{ if (S_INSN_CALL != state_) this->emitUnexpected("insn_call_close"); state_ = S_BLOCK_LEVEL; } -void ClfCbSeqChk::chkInsnSwitchCase() { +void ClfCbSeqChk::chkInsnSwitchCase() +{ if (S_INSN_SWITCH != state_) this->emitUnexpected("insn_switch_case"); } -void ClfCbSeqChk::setSwitchClose() { +void ClfCbSeqChk::setSwitchClose() +{ if (S_INSN_SWITCH != state_) this->emitUnexpected("insn_switch_close"); @@ -580,11 +597,13 @@ ClfLabelChk::ClfLabelChk(ICodeListener *slave): { } -void ClfLabelChk::reset() { +void ClfLabelChk::reset() +{ map_.clear(); } -void ClfLabelChk::defineLabel(const char *label) { +void ClfLabelChk::defineLabel(const char *label) +{ LabelState &ls = map_[label]; if (ls.defined) { CL_ERROR_MSG(&loc_, "redefinition of label '" << label << "'"); @@ -595,14 +614,16 @@ void ClfLabelChk::defineLabel(const char *label) { ls.loc = loc_; } -void ClfLabelChk::reqLabel(const char *label) { +void ClfLabelChk::reqLabel(const char *label) +{ LabelState &ls = map_[label]; ls.reachable = true; if (!ls.loc.file) ls.loc = loc_; } -void ClfLabelChk::emitWarnings() { +void ClfLabelChk::emitWarnings() +{ TMap::iterator i; for (i = map_.begin(); i != map_.end(); ++i) { const std::string label = i->first; @@ -632,7 +653,8 @@ namespace { // ///////////////////////////////////////////////////////////////////////////// // public interface, see clf_intchk.hh for more details -ICodeListener* createClfIntegrityChk(ICodeListener *slave) { +ICodeListener* createClfIntegrityChk(ICodeListener *slave) +{ return usageChk( new ClfLabelChk( new ClfCbSeqChk(slave))); diff --git a/cl/clf_opchk.cc b/cl/clf_opchk.cc index 2730a29ce..c13036109 100644 --- a/cl/clf_opchk.cc +++ b/cl/clf_opchk.cc @@ -26,7 +26,8 @@ ClfOpCheckerBase::ClfOpCheckerBase(ICodeListener *slave): { } -void ClfOpCheckerBase::handleArrayIdx(const struct cl_operand *op) { +void ClfOpCheckerBase::handleArrayIdx(const struct cl_operand *op) +{ if (CL_OPERAND_VOID == op->code) return; @@ -41,7 +42,8 @@ void ClfOpCheckerBase::handleArrayIdx(const struct cl_operand *op) { } } -void ClfOpCheckerBase::handleSrc(const struct cl_operand *op) { +void ClfOpCheckerBase::handleSrc(const struct cl_operand *op) +{ if (CL_OPERAND_VOID == op->code) return; @@ -60,7 +62,8 @@ void ClfOpCheckerBase::handleSrc(const struct cl_operand *op) { this->handleArrayIdx(op); } -void ClfOpCheckerBase::handleDstSrc(const struct cl_operand *op) { +void ClfOpCheckerBase::handleDstSrc(const struct cl_operand *op) +{ if (CL_OPERAND_VOID == op->code) return; diff --git a/cl/clf_unilabel.cc b/cl/clf_unilabel.cc index 4ad04f4c7..5914c5407 100644 --- a/cl/clf_unilabel.cc +++ b/cl/clf_unilabel.cc @@ -126,13 +126,15 @@ ClfUniLabel::ClfUniLabel(ICodeListener *slave, cl_scope_e scope): } } -std::string ClfUniLabel::resolveLabel(const char *label) { +std::string ClfUniLabel::resolveLabel(const char *label) +{ std::ostringstream str; str << "L" << this->labelLookup(label); return str.str(); } -int ClfUniLabel::labelLookup(const char *label) { +int ClfUniLabel::labelLookup(const char *label) +{ std::string str(label); TMap::iterator i = map_.find(str); @@ -143,13 +145,15 @@ int ClfUniLabel::labelLookup(const char *label) { return last_; } -void ClfUniLabel::reset() { +void ClfUniLabel::reset() +{ map_.clear(); last_ = 0; } // ///////////////////////////////////////////////////////////////////////////// // public interface, see clf_unilabel.hh for more details -ICodeListener* createClfUniLabel(ICodeListener *slave, cl_scope_e scope) { +ICodeListener* createClfUniLabel(ICodeListener *slave, cl_scope_e scope) +{ return new ClfUniLabel(slave, scope); } diff --git a/cl/clf_unswitch.cc b/cl/clf_unswitch.cc index 05daf3a63..1f2bbb20e 100644 --- a/cl/clf_unswitch.cc +++ b/cl/clf_unswitch.cc @@ -33,7 +33,8 @@ #define NULLIFY(what) \ memset(&(what), 0, sizeof (what)) -int getCaseVal(const struct cl_operand *op) { +int getCaseVal(const struct cl_operand *op) +{ CL_BREAK_IF(!op || !op->type); enum cl_type_e code = op->type->code; @@ -116,7 +117,8 @@ class ClfUnfoldSwitch: public ClFilterBase { using std::string; -ClfUnfoldSwitch::~ClfUnfoldSwitch() { +ClfUnfoldSwitch::~ClfUnfoldSwitch() +{ BOOST_FOREACH(struct cl_var *clv, ptrs_) { delete clv; } @@ -124,7 +126,8 @@ ClfUnfoldSwitch::~ClfUnfoldSwitch() { // FIXME: duplicated code from clf_uniregs.cc // TODO: implement shared module providing this -void ClfUnfoldSwitch::cloneSwitchSrc(const struct cl_operand *op) { +void ClfUnfoldSwitch::cloneSwitchSrc(const struct cl_operand *op) +{ CL_BREAK_IF(!op); src_ = *op; @@ -142,7 +145,8 @@ void ClfUnfoldSwitch::cloneSwitchSrc(const struct cl_operand *op) { // FIXME: duplicated code from clf_uniregs.cc // TODO: implement shared module providing this -void ClfUnfoldSwitch::freeClonedSwitchSrc() { +void ClfUnfoldSwitch::freeClonedSwitchSrc() +{ struct cl_accessor *ac = src_.accessor; while (ac) { struct cl_accessor *next = ac->next; @@ -154,7 +158,8 @@ void ClfUnfoldSwitch::freeClonedSwitchSrc() { } } -struct cl_var* ClfUnfoldSwitch::acquireClVar() { +struct cl_var* ClfUnfoldSwitch::acquireClVar() +{ struct cl_var *clv = new struct cl_var; memset(clv, 0, sizeof *clv); clv->uid = /* XXX */ 0x400000 + switchCnt_; @@ -217,7 +222,8 @@ void ClfUnfoldSwitch::emitCase(int cst, struct cl_type *type, const char *label) ClFilterBase::bb_open(aux_label); } -void ClfUnfoldSwitch::emitDefault() { +void ClfUnfoldSwitch::emitDefault() +{ if (defLabel_.empty()) CL_TRAP; @@ -232,7 +238,8 @@ void ClfUnfoldSwitch::emitDefault() { // ///////////////////////////////////////////////////////////////////////////// // public interface, see clf_unswitch.hh for more details -ICodeListener* createClfUnfoldSwitch(ICodeListener *slave) { +ICodeListener* createClfUnfoldSwitch(ICodeListener *slave) +{ return createClfUniLabel(new ClfUnfoldSwitch( createClfUniLabel(slave, CL_SCOPE_GLOBAL)), CL_SCOPE_GLOBAL); diff --git a/cl/clutil.cc b/cl/clutil.cc index f35f6fa5a..36fb3da34 100644 --- a/cl/clutil.cc +++ b/cl/clutil.cc @@ -29,7 +29,8 @@ #include #include -bool operator==(const struct cl_type &a, const struct cl_type &b) { +bool operator==(const struct cl_type &a, const struct cl_type &b) +{ // go through the given types recursively and match UIDs etc. typedef std::pair TItem; std::stack todo; @@ -100,7 +101,8 @@ bool operator==(const struct cl_type &a, const struct cl_type &b) { return true; } -const struct cl_type* targetTypeOfPtr(const struct cl_type *clt) { +const struct cl_type* targetTypeOfPtr(const struct cl_type *clt) +{ if (!clt || clt->code != CL_TYPE_PTR) return /* not a pointer */ 0; @@ -111,7 +113,8 @@ const struct cl_type* targetTypeOfPtr(const struct cl_type *clt) { return clt; } -const struct cl_type* targetTypeOfArray(const struct cl_type *clt) { +const struct cl_type* targetTypeOfArray(const struct cl_type *clt) +{ CL_BREAK_IF(!clt || clt->code != CL_TYPE_ARRAY || clt->item_cnt != 1); clt = clt->items[/* target */ 0].type; @@ -119,7 +122,8 @@ const struct cl_type* targetTypeOfArray(const struct cl_type *clt) { return clt; } -bool seekRefAccessor(const struct cl_accessor *ac) { +bool seekRefAccessor(const struct cl_accessor *ac) +{ for(; ac; ac = ac->next) { if (CL_ACCESSOR_REF != ac->code) continue; @@ -133,7 +137,8 @@ bool seekRefAccessor(const struct cl_accessor *ac) { return false; } -int intCstFromOperand(const struct cl_operand *op) { +int intCstFromOperand(const struct cl_operand *op) +{ CL_BREAK_IF(CL_OPERAND_CST != op->code); const struct cl_cst &cst = op->data.cst; @@ -142,7 +147,8 @@ int intCstFromOperand(const struct cl_operand *op) { return cst.data.cst_int.value; } -int varIdFromOperand(const struct cl_operand *op, const char **pName) { +int varIdFromOperand(const struct cl_operand *op, const char **pName) +{ CL_BREAK_IF(CL_OPERAND_VAR != op->code); if (pName) *pName = op->data.var->name; @@ -150,7 +156,8 @@ int varIdFromOperand(const struct cl_operand *op, const char **pName) { return op->data.var->uid; } -bool fncNameFromCst(const char **pName, const struct cl_operand *op) { +bool fncNameFromCst(const char **pName, const struct cl_operand *op) +{ if (CL_OPERAND_CST != op->code) return false; @@ -167,7 +174,8 @@ bool fncNameFromCst(const char **pName, const struct cl_operand *op) { return !!fncName; } -bool fncUidFromOperand(int *pUid, const struct cl_operand *op) { +bool fncUidFromOperand(int *pUid, const struct cl_operand *op) +{ if (CL_OPERAND_CST != op->code) return false; @@ -198,7 +206,8 @@ std::string varToString( return str.str(); } -int offsetByIdxChain(const struct cl_type *clt, const TFieldIdxChain &ic) { +int offsetByIdxChain(const struct cl_type *clt, const TFieldIdxChain &ic) +{ int off = 0; BOOST_FOREACH(const int idx, ic) { diff --git a/cl/code_listener.cc b/cl/code_listener.cc index 9f6edf9dc..9e6a06f5a 100644 --- a/cl/code_listener.cc +++ b/cl/code_listener.cc @@ -165,7 +165,8 @@ void cl_global_cleanup(void) free((char *)app_name); } -bool cl_is_term_insn(enum cl_insn_e code) { +bool cl_is_term_insn(enum cl_insn_e code) +{ switch (code) { case CL_INSN_JMP: case CL_INSN_COND: diff --git a/cl/killer.cc b/cl/killer.cc index eb1d0c33f..b0a57df2d 100644 --- a/cl/killer.cc +++ b/cl/killer.cc @@ -78,7 +78,8 @@ struct Data { } }; -void scanOperand(BlockData &bData, const cl_operand &op, bool dst) { +void scanOperand(BlockData &bData, const cl_operand &op, bool dst) +{ VK_DEBUG(4, "scanOperand: " << op << ((dst) ? " [dst]" : " [src]")); bool fieldOfComp = false; @@ -134,7 +135,8 @@ void scanOperand(BlockData &bData, const cl_operand &op, bool dst) { VK_DEBUG(3, "gen(" << name << ")"); } -void scanInsn(BlockData &bData, const Insn &insn) { +void scanInsn(BlockData &bData, const Insn &insn) +{ VK_DEBUG_MSG(3, &insn.loc, "scanInsn: " << insn); const TOperandList opList = insn.operands; @@ -170,7 +172,8 @@ void scanInsn(BlockData &bData, const Insn &insn) { } } -void updateBlock(Data &data, TBlock bb) { +void updateBlock(Data &data, TBlock bb) +{ VK_DEBUG_MSG(2, &bb->front()->loc, "updateBlock: " << bb->name()); BlockData &bData = data.blocks[bb]; bool anyChange = false; @@ -199,7 +202,8 @@ void updateBlock(Data &data, TBlock bb) { data.todo.insert(bbDst); } -void computeFixPoint(Data &data) { +void computeFixPoint(Data &data) +{ // fixed-point computation unsigned cntSteps = 1; TBlockSet &todo = data.todo; @@ -312,7 +316,8 @@ void commitInsn( } } -void commitBlock(Data &data, TBlock bb) { +void commitBlock(Data &data, TBlock bb) +{ const TTargetList &targets = bb->targets(); const unsigned cntTargets = targets.size(); const bool multipleTargets = (1 < cntTargets); @@ -381,7 +386,8 @@ void commitBlock(Data &data, TBlock bb) { } } -void analyzeFnc(Fnc &fnc) { +void analyzeFnc(Fnc &fnc) +{ // shared state info Data data(*fnc.stor); @@ -415,7 +421,8 @@ void analyzeFnc(Fnc &fnc) { } // namespace VarKiller -void killLocalVariables(Storage &stor) { +void killLocalVariables(Storage &stor) +{ StopWatch watch; // analyze all _defined_ functions diff --git a/cl/loopscan.cc b/cl/loopscan.cc index 38efc1ae1..79dfb8b43 100644 --- a/cl/loopscan.cc +++ b/cl/loopscan.cc @@ -67,7 +67,8 @@ typedef std::stack TDfsStack; typedef std::pair TCfgEdge; typedef std::set TEdgeSet; -void analyzeFnc(Fnc &fnc) { +void analyzeFnc(Fnc &fnc) +{ const TLoc loc = &fnc.def.data.cst.data.cst_fnc.loc; LS_DEBUG_MSG(2, loc, ">>> entering " << nameOf(fnc) << "()"); @@ -142,7 +143,8 @@ void analyzeFnc(Fnc &fnc) { } // namespace LoopScan -void findLoopClosingEdges(Storage &stor) { +void findLoopClosingEdges(Storage &stor) +{ StopWatch watch; // go through all _defined_ functions diff --git a/cl/ssd.cc b/cl/ssd.cc index 3f2b2a409..8ab6a7a1e 100644 --- a/cl/ssd.cc +++ b/cl/ssd.cc @@ -31,33 +31,39 @@ namespace ssd { // ColorConsole implementation bool ColorConsole::enabled_ = false; -void ColorConsole::enable(bool value) { +void ColorConsole::enable(bool value) +{ enabled_ = value; } -bool ColorConsole::isEnabled() { +bool ColorConsole::isEnabled() +{ return enabled_; } -void ColorConsole::enableForTerm() { +void ColorConsole::enableForTerm() +{ #if HAVE_ISATTY enabled_ = isatty(STDOUT_FILENO) && isatty(STDERR_FILENO); #endif } -void ColorConsole::enableForTerm(int fd) { +void ColorConsole::enableForTerm(int fd) +{ #if HAVE_ISATTY enabled_ = isatty(fd); #endif } -void ColorConsole::enableIfCoutIsTerm() { +void ColorConsole::enableIfCoutIsTerm() +{ #if HAVE_ISATTY enabled_ = isatty(STDOUT_FILENO); #endif } -void ColorConsole::enableIfCerrIsTerm() { +void ColorConsole::enableIfCerrIsTerm() +{ #if HAVE_ISATTY enabled_ = isatty(STDERR_FILENO); #endif @@ -65,7 +71,8 @@ void ColorConsole::enableIfCerrIsTerm() { // ///////////////////////////////////////////////////////////////////////////// // Color implementation -std::ostream& operator<< (std::ostream &stream, const Color &color) { +std::ostream& operator<< (std::ostream &stream, const Color &color) +{ static const char ESC = '\033'; if (!ColorConsole::isEnabled()) return stream; @@ -101,11 +108,13 @@ Colorize::Colorize(std::ostream &stream, EColor color): stream_ << Color(color); } -Colorize::~Colorize() { +Colorize::~Colorize() +{ stream_ << Color(C_NO_COLOR); } -std::ostream& Colorize::stream() { +std::ostream& Colorize::stream() +{ return stream_; } diff --git a/cl/stopwatch.cc b/cl/stopwatch.cc index 0081d38d5..117ccbbd4 100644 --- a/cl/stopwatch.cc +++ b/cl/stopwatch.cc @@ -31,21 +31,25 @@ StopWatch::StopWatch(): this->reset(); } -StopWatch::~StopWatch() { +StopWatch::~StopWatch() +{ delete d; } -void StopWatch::reset() { +void StopWatch::reset() +{ d->start = clock(); } -float /* sec */ StopWatch::elapsed() const { +float /* sec */ StopWatch::elapsed() const +{ static const float RATIO = CLOCKS_PER_SEC; const float diff = clock() - d->start; return diff/RATIO; } -std::ostream& operator<<(std::ostream &str, const StopWatch &watch) { +std::ostream& operator<<(std::ostream &str, const StopWatch &watch) +{ using namespace std; const std::ios_base::fmtflags oldFlags = str.flags(); diff --git a/cl/storage.cc b/cl/storage.cc index 3ec716a36..fceed24d8 100644 --- a/cl/storage.cc +++ b/cl/storage.cc @@ -95,7 +95,8 @@ Var::Var(): { } -Var::~Var() { +Var::~Var() +{ } Var::Var(EVar code_, const struct cl_operand *op): @@ -137,7 +138,8 @@ Var::Var(EVar code_, const struct cl_operand *op): CL_BREAK_IF("attempt to create invalid CodeStorage::Var object"); } -bool isOnStack(const Var &var) { +bool isOnStack(const Var &var) +{ const EVar code = var.code; switch (code) { case VAR_FNC_ARG: @@ -166,7 +168,8 @@ VarDb::VarDb(): { } -VarDb::~VarDb() { +VarDb::~VarDb() +{ BOOST_FOREACH(const Var &var, vars_) BOOST_FOREACH(const Insn *insn, var.initials) destroyInsn(const_cast(insn)); @@ -174,11 +177,13 @@ VarDb::~VarDb() { delete d; } -Var& VarDb::operator[](int uid) { +Var& VarDb::operator[](int uid) +{ return dbLookup(d->db, vars_, uid); } -const Var& VarDb::operator[](int uid) const { +const Var& VarDb::operator[](int uid) const +{ return dbConstLookup(d->db, vars_, uid); } @@ -209,17 +214,20 @@ TypeDb::TypeDb(): { } -TypeDb::~TypeDb() { +TypeDb::~TypeDb() +{ delete d; } -void TypeDb::Private::updatePtrSizeof(int size, int *pField) { +void TypeDb::Private::updatePtrSizeof(int size, int *pField) +{ CL_BREAK_IF(size <= 0); CL_BREAK_IF(-1 != *pField && *pField != size); *pField = size; } -void TypeDb::Private::digPtrSizeof(const struct cl_type *clt) { +void TypeDb::Private::digPtrSizeof(const struct cl_type *clt) +{ if (CL_TYPE_PTR != clt->code) return; @@ -237,7 +245,8 @@ void TypeDb::Private::digPtrSizeof(const struct cl_type *clt) { this->genericDataPtr = clt; } -bool TypeDb::insert(const struct cl_type *clt) { +bool TypeDb::insert(const struct cl_type *clt) +{ if (!clt) { CL_DEBUG("TypeDb::insert() got a NULL pointer"); return false; @@ -258,19 +267,23 @@ bool TypeDb::insert(const struct cl_type *clt) { return true; } -int TypeDb::codePtrSizeof() const { +int TypeDb::codePtrSizeof() const +{ return d->codePtrSizeof; } -int TypeDb::dataPtrSizeof() const { +int TypeDb::dataPtrSizeof() const +{ return d->dataPtrSizeof; } -const struct cl_type* TypeDb::genericDataPtr() const { +const struct cl_type* TypeDb::genericDataPtr() const +{ return d->genericDataPtr; } -void readTypeTree(TypeDb &db, const struct cl_type *clt) { +void readTypeTree(TypeDb &db, const struct cl_type *clt) +{ if (!clt) { #if 0 CL_DEBUG("readTypeTree() got a NULL pointer"); @@ -293,7 +306,8 @@ void readTypeTree(TypeDb &db, const struct cl_type *clt) { } } -const struct cl_type* TypeDb::operator[](int uid) const { +const struct cl_type* TypeDb::operator[](int uid) const +{ typedef Private::TMap TDb; TDb &db = d->db; TDb::iterator iter = db.find(uid); @@ -312,7 +326,8 @@ const struct cl_type* TypeDb::operator[](int uid) const { // ///////////////////////////////////////////////////////////////////////////// // Block implementation -void Block::append(Insn *insn) { +void Block::append(Insn *insn) +{ #ifndef NDEBUG if (!insns_.empty()) { // check insn sequence @@ -324,28 +339,33 @@ void Block::append(Insn *insn) { insns_.push_back(insn); } -void Block::appendPredecessor(Block *pred) { +void Block::appendPredecessor(Block *pred) +{ inbound_.push_back(pred); } -const Insn* Block::front() const { +const Insn* Block::front() const +{ CL_BREAK_IF(insns_.empty()); return insns_.front(); } -const Insn* Block::back() const { +const Insn* Block::back() const +{ CL_BREAK_IF(insns_.empty()); return insns_.back(); } -const TTargetList& Block::targets() const { +const TTargetList& Block::targets() const +{ const Insn *last = this->back(); CL_BREAK_IF(!cl_is_term_insn(last->code)); return last->targets; } -bool Block::isLoopEntry() const { +bool Block::isLoopEntry() const +{ BOOST_FOREACH(const Block *ref, inbound_) { const Insn *term = ref->back(); const TTargetList &tList = ref->targets(); @@ -378,23 +398,27 @@ ControlFlow::ControlFlow(const ControlFlow &ref): { } -ControlFlow::~ControlFlow() { +ControlFlow::~ControlFlow() +{ delete d; } -ControlFlow& ControlFlow::operator=(const ControlFlow &ref) { +ControlFlow& ControlFlow::operator=(const ControlFlow &ref) +{ bbs_ = ref.bbs_; delete d; d = new Private(*ref.d); return *this; } -const Block* ControlFlow::entry() const { +const Block* ControlFlow::entry() const +{ CL_BREAK_IF(bbs_.empty()); return bbs_[0]; } -Block*& ControlFlow::operator[](const char *name) { +Block*& ControlFlow::operator[](const char *name) +{ Block* &ref = dbLookup(d->db, bbs_, name, 0); if (!ref) // the object will be NOT destroyed by ControlFlow @@ -403,14 +427,16 @@ Block*& ControlFlow::operator[](const char *name) { return ref; } -const Block* ControlFlow::operator[](const char *name) const { +const Block* ControlFlow::operator[](const char *name) const +{ return dbConstLookup(d->db, bbs_, name); } // ///////////////////////////////////////////////////////////////////////////// // Fnc implementation -inline const struct cl_cst& cstFromFnc(const Fnc &fnc) { +inline const struct cl_cst& cstFromFnc(const Fnc &fnc) +{ const struct cl_operand &op = fnc.def; CL_BREAK_IF(CL_OPERAND_CST != op.code); @@ -421,22 +447,26 @@ inline const struct cl_cst& cstFromFnc(const Fnc &fnc) { return cst; } -const char* nameOf(const Fnc &fnc) { +const char* nameOf(const Fnc &fnc) +{ const struct cl_cst &cst = cstFromFnc(fnc); return cst.data.cst_fnc.name; } -const struct cl_loc* locationOf(const Fnc &fnc) { +const struct cl_loc* locationOf(const Fnc &fnc) +{ const struct cl_cst &cst = cstFromFnc(fnc); return &cst.data.cst_fnc.loc; } -int uidOf(const Fnc &fnc) { +int uidOf(const Fnc &fnc) +{ const struct cl_cst &cst = cstFromFnc(fnc); return cst.data.cst_fnc.uid; } -bool isDefined(const Fnc &fnc) { +bool isDefined(const Fnc &fnc) +{ return CL_OPERAND_CST == fnc.def.code && !cstFromFnc(fnc).data.cst_fnc.is_extern; } @@ -459,18 +489,21 @@ FncDb::FncDb(const FncDb &ref): { } -FncDb::~FncDb() { +FncDb::~FncDb() +{ delete d; } -FncDb& FncDb::operator=(const FncDb &ref) { +FncDb& FncDb::operator=(const FncDb &ref) +{ fncs_ = ref.fncs_; delete d; d = new Private(*ref.d); return *this; } -Fnc*& FncDb::operator[](int uid) { +Fnc*& FncDb::operator[](int uid) +{ Fnc* &ref = dbLookup(d->db, fncs_, uid, 0); if (!ref) // the object will be NOT destroyed by FncDb @@ -479,7 +512,8 @@ Fnc*& FncDb::operator[](int uid) { return ref; } -const Fnc* FncDb::operator[](int uid) const { +const Fnc* FncDb::operator[](int uid) const +{ return dbConstLookup(d->db, fncs_, uid); } diff --git a/cl/util.hh b/cl/util.hh index df2e9694e..9a19f4502 100644 --- a/cl/util.hh +++ b/cl/util.hh @@ -30,7 +30,8 @@ #define FIXW(w) std::fixed << std::setfill('0') << std::setw(w) template -void swapValues(T &a, T &b) { +void swapValues(T &a, T &b) +{ const T tmp = a; a = b; b = tmp; @@ -38,23 +39,27 @@ void swapValues(T &a, T &b) { // ensure (a <= b) template -void sortValues(T &a, T &b) { +void sortValues(T &a, T &b) +{ if (b < a) swapValues(a, b); } template -bool hasKey(const TCont &cont, const typename TCont::key_type &key) { +bool hasKey(const TCont &cont, const typename TCont::key_type &key) +{ return cont.end() != cont.find(key); } template -bool hasKey(const TCont *cont, const typename TCont::key_type &key) { +bool hasKey(const TCont *cont, const typename TCont::key_type &key) +{ return hasKey(*cont, key); } template -bool insertOnce(TCont &cont, const typename TCont::key_type &key) { +bool insertOnce(TCont &cont, const typename TCont::key_type &key) +{ return cont.insert(key)./* inserted */second; } diff --git a/fwnull/cl_fwnull.cc b/fwnull/cl_fwnull.cc index 2dea64261..df1966262 100644 --- a/fwnull/cl_fwnull.cc +++ b/fwnull/cl_fwnull.cc @@ -141,7 +141,8 @@ void handleDerefs(Data::TState &state, const CodeStorage::Insn *insn) } /// returns true for VS_NOT_NULL and VS_NOT_NULL_DEDUCED -inline bool anyNotNull(const EVarState code) { +inline bool anyNotNull(const EVarState code) +{ switch (code) { case VS_NOT_NULL: case VS_NOT_NULL_DEDUCED: @@ -157,7 +158,8 @@ inline bool anyNotNull(const EVarState code) { * @param dst destination state (used in read-write mode) * @param src source state (used in read-only mode) */ -bool mergeValues(VarState &dst, const VarState &src) { +bool mergeValues(VarState &dst, const VarState &src) +{ if (VS_UNDEF == src.code || VS_MIGHT_BE_NULL == dst.code) // nothing to propagate actually return false; @@ -199,7 +201,8 @@ bool mergeValues(VarState &dst, const VarState &src) { * @param state state valid per current instruction * @param insn instruction you want to process */ -void handleInsnUnop(Data::TState &state, const CodeStorage::Insn *insn) { +void handleInsnUnop(Data::TState &state, const CodeStorage::Insn *insn) +{ handleDerefs(state, insn); const struct cl_operand &dst = insn->operands[0]; @@ -325,7 +328,8 @@ bool handleInsnCmpNull(Data::TState &state, * @param state state valid per current instruction * @param insn instruction you want to process */ -void handleInsnBinop(Data::TState &state, const CodeStorage::Insn *insn) { +void handleInsnBinop(Data::TState &state, const CodeStorage::Insn *insn) +{ const CodeStorage::TOperandList &opList = insn->operands; #ifndef NDEBUG @@ -392,7 +396,8 @@ void handleInsnBinop(Data::TState &state, const CodeStorage::Insn *insn) { * @param state state valid per current instruction * @param insn instruction you want to process */ -void handleInsnCall(Data::TState &state, const CodeStorage::Insn *insn) { +void handleInsnCall(Data::TState &state, const CodeStorage::Insn *insn) +{ const struct cl_operand &dst = insn->operands[0]; if (dst.accessor) // we're interested only in direct manipulation of variables here @@ -437,7 +442,8 @@ void treatRefAsSideEffect(Data::TState &state, * @param state state valid per current instruction * @param insn instruction you want to process */ -void handleInsnNonterm(Data::TState &state, const CodeStorage::Insn *insn) { +void handleInsnNonterm(Data::TState &state, const CodeStorage::Insn *insn) +{ treatRefAsSideEffect(state, insn->operands); const enum cl_insn_e code = insn->code; @@ -496,7 +502,8 @@ void updateState(Data &data, * @param uid CodeStorage uid of the branch-by variable * @param val true in 'then' branch, false in 'else' branch */ -void replaceInBranch(Data::TState &state, int uid, bool val) { +void replaceInBranch(Data::TState &state, int uid, bool val) +{ VarState &vs = state[uid]; bool isNull; @@ -620,7 +627,8 @@ void handleInsnTerm(Data &data, } } -void handleBlock(Data &data, Data::TBlock bb) { +void handleBlock(Data &data, Data::TBlock bb) +{ // go through the sequence of instructions of the current basic block Data::TState next = data.stateMap[bb]; BOOST_FOREACH(const CodeStorage::Insn *insn, *bb) { @@ -634,7 +642,8 @@ void handleBlock(Data &data, Data::TBlock bb) { } } -void handleFnc(const CodeStorage::Fnc &fnc) { +void handleFnc(const CodeStorage::Fnc &fnc) +{ using namespace CodeStorage; Data data; @@ -661,7 +670,8 @@ void handleFnc(const CodeStorage::Fnc &fnc) { // ///////////////////////////////////////////////////////////////////////////// // see easy.hh for details -void clEasyRun(const CodeStorage::Storage &stor, const char *) { +void clEasyRun(const CodeStorage::Storage &stor, const char *) +{ using namespace CodeStorage; BOOST_FOREACH(const Fnc *pFnc, stor.fncs) { diff --git a/include/cl/cl_msg.hh b/include/cl/cl_msg.hh index eedfc33be..c18d9c629 100644 --- a/include/cl/cl_msg.hh +++ b/include/cl/cl_msg.hh @@ -141,7 +141,8 @@ CL_DEBUG_MSG(loc, what); \ } while (0) -inline std::ostream& operator<<(std::ostream &str, const struct cl_loc &loc) { +inline std::ostream& operator<<(std::ostream &str, const struct cl_loc &loc) +{ if (!&loc || !loc.file) { str << ": "; return str; diff --git a/include/cl/clutil.hh b/include/cl/clutil.hh index 0fb9d1117..0a60418c7 100644 --- a/include/cl/clutil.hh +++ b/include/cl/clutil.hh @@ -41,7 +41,8 @@ namespace CodeStorage { bool operator==(const struct cl_type &cltA, const struct cl_type &cltB); /// compare given two pieces of static type-info semantically -inline bool operator!=(const struct cl_type &cltA, const struct cl_type &cltB) { +inline bool operator!=(const struct cl_type &cltA, const struct cl_type &cltB) +{ return !(cltA == cltB); } @@ -51,7 +52,8 @@ const struct cl_type* targetTypeOfPtr(const struct cl_type *clt); /// return type of the @b target object that the array type can point to const struct cl_type* targetTypeOfArray(const struct cl_type *clt); -inline bool isComposite(const struct cl_type *clt, bool includingArray = true) { +inline bool isComposite(const struct cl_type *clt, bool includingArray = true) +{ if (!clt) return false; @@ -68,7 +70,8 @@ inline bool isComposite(const struct cl_type *clt, bool includingArray = true) { } } -inline bool isDataPtr(const struct cl_type *clt) { +inline bool isDataPtr(const struct cl_type *clt) +{ if (!clt || clt->code != CL_TYPE_PTR) return false; @@ -76,7 +79,8 @@ inline bool isDataPtr(const struct cl_type *clt) { return (CL_TYPE_FNC != clt->code); } -inline bool isCodePtr(const struct cl_type *clt) { +inline bool isCodePtr(const struct cl_type *clt) +{ if (!clt || clt->code != CL_TYPE_PTR) return false; @@ -85,7 +89,8 @@ inline bool isCodePtr(const struct cl_type *clt) { } /// return true if the given operand is a local variable -inline bool isLcVar(const cl_operand &op) { +inline bool isLcVar(const cl_operand &op) +{ if (CL_OPERAND_VAR != op.code) // not a variable return false; From 971c8a80ad2d927a6b61f9e89b59602e8ed33c0b Mon Sep 17 00:00:00 2001 From: Kamil Dudka Date: Wed, 22 Aug 2012 23:20:30 +0200 Subject: [PATCH 2/4] sl: always write { on new line when opening fnc body Suggested by Jakub Filak. --- sl/cl_symexec.cc | 12 +- sl/intrange.cc | 57 ++++++--- sl/intrange.hh | 33 +++-- sl/memdebug.cc | 33 +++-- sl/plotenum.cc | 3 +- sl/prototype.cc | 18 ++- sl/sigcatch.cc | 15 ++- sl/symabstract.cc | 33 +++-- sl/symbin.cc | 12 +- sl/symbt.cc | 42 ++++--- sl/symcall.cc | 51 +++++--- sl/symcmp.hh | 3 +- sl/symcut.cc | 18 ++- sl/symdiscover.cc | 6 +- sl/symdiscover.hh | 3 +- sl/symdump.cc | 15 ++- sl/syments.hh | 21 ++-- sl/symexec.cc | 75 ++++++++---- sl/symgc.cc | 27 +++-- sl/symheap.cc | 300 ++++++++++++++++++++++++++++++---------------- sl/symheap.hh | 24 ++-- sl/symjoin.cc | 51 +++++--- sl/symjoin.hh | 3 +- sl/sympath.cc | 9 +- sl/symplot.cc | 81 ++++++++----- sl/symproc.cc | 105 ++++++++++------ sl/symproc.hh | 3 +- sl/symseg.cc | 18 ++- sl/symseg.hh | 33 +++-- sl/symstate.cc | 69 +++++++---- sl/symtrace.cc | 105 ++++++++++------ sl/symutil.cc | 24 ++-- sl/symutil.hh | 18 ++- 33 files changed, 880 insertions(+), 440 deletions(-) diff --git a/sl/cl_symexec.cc b/sl/cl_symexec.cc index c8254cf53..fc326f3f9 100644 --- a/sl/cl_symexec.cc +++ b/sl/cl_symexec.cc @@ -41,7 +41,8 @@ extern "C" { int plugin_is_GPL_compatible; } // FIXME: the implementation is amusing -void parseConfigString(SymExecParams &sep, std::string cnf) { +void parseConfigString(SymExecParams &sep, std::string cnf) +{ using std::string; if (cnf.empty()) return; @@ -86,7 +87,8 @@ void parseConfigString(SymExecParams &sep, std::string cnf) { CL_WARN("unhandled config string: \"" << cnf << "\""); } -void digGlJunk(SymHeap &sh) { +void digGlJunk(SymHeap &sh) +{ using namespace CodeStorage; TStorRef stor = sh.stor(); SymBackTrace bt(stor); @@ -160,7 +162,8 @@ void execVirtualRoots(const CodeStorage::Storage &stor, const SymExecParams &ep) } } -void launchSymExec(const CodeStorage::Storage &stor, const SymExecParams &ep) { +void launchSymExec(const CodeStorage::Storage &stor, const SymExecParams &ep) +{ using namespace CodeStorage; // look for main() by name @@ -189,7 +192,8 @@ void launchSymExec(const CodeStorage::Storage &stor, const SymExecParams &ep) { // ///////////////////////////////////////////////////////////////////////////// // see easy.hh for details -void clEasyRun(const CodeStorage::Storage &stor, const char *configString) { +void clEasyRun(const CodeStorage::Storage &stor, const char *configString) +{ // read parameters of symbolic execution SymExecParams ep; parseConfigString(ep, configString); diff --git a/sl/intrange.cc b/sl/intrange.cc index 433cd935e..ea3135e8e 100644 --- a/sl/intrange.cc +++ b/sl/intrange.cc @@ -47,7 +47,8 @@ const Range FullRange = { (IntMin != (n) && (n) < RZ_MIN) || \ (IntMax != (n) && RZ_MAX < (n))) -void chkRange(const Range &rng) { +void chkRange(const Range &rng) +{ // check red zone CL_BREAK_IF(RZ_CORRUPTION(rng.lo)); CL_BREAK_IF(RZ_CORRUPTION(rng.hi)); @@ -66,7 +67,8 @@ void chkRange(const Range &rng) { } // TODO: replace this implementation by something useful (it can loop badly) -TInt greatestCommonDivisor(TInt a, TInt b) { +TInt greatestCommonDivisor(TInt a, TInt b) +{ CL_BREAK_IF(a < RZ_MIN || RZ_MAX < a); CL_BREAK_IF(b < RZ_MIN || RZ_MAX < b); @@ -90,7 +92,8 @@ TInt greatestCommonDivisor(TInt a, TInt b) { return a; } -Range join(const Range &rng1, const Range &rng2) { +Range join(const Range &rng1, const Range &rng2) +{ Range result; result.lo = std::min(rng1.lo, rng2.lo); result.hi = std::max(rng1.hi, rng2.hi); @@ -102,7 +105,8 @@ Range join(const Range &rng1, const Range &rng2) { return result; } -bool isRangeByNum(bool *pIsRange1, const Range &rng1, const Range &rng2) { +bool isRangeByNum(bool *pIsRange1, const Range &rng1, const Range &rng2) +{ const bool isRange1 = !isSingular(rng1); const bool isRange2 = !isSingular(rng2); if (isRange1 == isRange2) @@ -112,7 +116,8 @@ bool isRangeByNum(bool *pIsRange1, const Range &rng1, const Range &rng2) { return true; } -bool isCovered(const Range &small, const Range &big) { +bool isCovered(const Range &small, const Range &big) +{ chkRange(small); chkRange(big); @@ -122,17 +127,20 @@ bool isCovered(const Range &small, const Range &big) { greatestCommonDivisor(small.alignment, big.alignment)); } -bool isSingular(const Range &range) { +bool isSingular(const Range &range) +{ chkRange(range); return (range.lo == range.hi); } -bool isAligned(const Range &range) { +bool isAligned(const Range &range) +{ chkRange(range); return (Int1 < range.alignment); } -TUInt widthOf(const Range &range) { +TUInt widthOf(const Range &range) +{ chkRange(range); if (range == FullRange) @@ -142,7 +150,8 @@ TUInt widthOf(const Range &range) { return /* closed interval */ 1UL + range.hi - range.lo; } -TInt invertInt(const TInt num) { +TInt invertInt(const TInt num) +{ CL_BREAK_IF(RZ_CORRUPTION(num)); if (IntMin == num) @@ -160,7 +169,8 @@ enum EIntBinOp { IBO_RSHIFT }; -inline void intBinOp(TInt *pDst, const TInt other, const EIntBinOp code) { +inline void intBinOp(TInt *pDst, const TInt other, const EIntBinOp code) +{ switch (code) { case IBO_ADD: (*pDst) += other; @@ -181,7 +191,8 @@ inline void intBinOp(TInt *pDst, const TInt other, const EIntBinOp code) { } // the real arithmetic actually works only for "small" numbers this way -inline void rngBinOp(Range &rng, const Range &other, const EIntBinOp code) { +inline void rngBinOp(Range &rng, const Range &other, const EIntBinOp code) +{ chkRange(rng); chkRange(other); @@ -200,7 +211,8 @@ inline void rngBinOp(Range &rng, const Range &other, const EIntBinOp code) { } } -TInt alignmentOf(const Range &rng) { +TInt alignmentOf(const Range &rng) +{ chkRange(rng); if (!isSingular(rng)) @@ -216,7 +228,8 @@ TInt alignmentOf(const Range &rng) { return num; } -Range& operator+=(Range &rng, const Range &other) { +Range& operator+=(Range &rng, const Range &other) +{ // this needs to be done before rng is modified const TInt al1 = alignmentOf(rng); const TInt al2 = alignmentOf(other); @@ -233,21 +246,24 @@ Range& operator+=(Range &rng, const Range &other) { return rng; } -Range& operator<<=(Range &rng, const TUInt n) { +Range& operator<<=(Range &rng, const TUInt n) +{ rngBinOp(rng, rngFromNum(n), IBO_LSHIFT); rng.alignment = Int1; chkRange(rng); return rng; } -Range& operator>>=(Range &rng, const TUInt n) { +Range& operator>>=(Range &rng, const TUInt n) +{ rngBinOp(rng, rngFromNum(n), IBO_RSHIFT); rng.alignment = Int1; chkRange(rng); return rng; } -Range& operator*=(Range &rng, const Range &other) { +Range& operator*=(Range &rng, const Range &other) +{ // this needs to be done before rng is modified TInt coef = Int1; bool isRange1; @@ -276,7 +292,8 @@ Range& operator*=(Range &rng, const Range &other) { return rng; } -bool isZeroIntersection(TInt alignment, TInt mask) { +bool isZeroIntersection(TInt alignment, TInt mask) +{ CL_BREAK_IF(alignment < Int1); if (mask) @@ -294,7 +311,8 @@ bool isZeroIntersection(TInt alignment, TInt mask) { return false; } -TInt maskToAlignment(TInt mask) { +TInt maskToAlignment(TInt mask) +{ if (!mask) { CL_BREAK_IF("invalid call of maskToAlignment()"); return Int1; @@ -309,7 +327,8 @@ TInt maskToAlignment(TInt mask) { return alignment; } -Range& operator&=(Range &rng, TInt mask) { +Range& operator&=(Range &rng, TInt mask) +{ if (isZeroIntersection(rng.alignment, mask)) // the whole range was masked, we are back to zero return (rng = rngFromNum(Int0)); diff --git a/sl/intrange.hh b/sl/intrange.hh index 5409303d7..ca26efd44 100644 --- a/sl/intrange.hh +++ b/sl/intrange.hh @@ -41,7 +41,8 @@ struct Range { // NOTE: there is no constructor because we put Range to unions }; -inline Range rngFromNum(TInt num) { +inline Range rngFromNum(TInt num) +{ Range rng; rng.lo = num; @@ -56,13 +57,15 @@ extern const Range FullRange; /// this does nothing unless running a debug build void chkRange(const Range &rng); -inline bool operator==(const Range &a, const Range &b) { +inline bool operator==(const Range &a, const Range &b) +{ return (a.lo == b.lo) && (a.hi == b.hi) && (a.alignment == b.alignment); } -inline bool operator!=(const Range &a, const Range &b) { +inline bool operator!=(const Range &a, const Range &b) +{ return !operator==(a, b); } @@ -70,7 +73,8 @@ inline bool operator!=(const Range &a, const Range &b) { TInt invertInt(const TInt); /// invert polarity of the range -inline Range operator-(Range rng) { +inline Range operator-(Range rng) +{ const TInt hi = invertInt(rng.lo); rng.lo = invertInt(rng.hi); rng.hi = hi; @@ -91,37 +95,44 @@ Range& operator<<=(Range &rng, const TUInt); Range& operator>>=(Range &rng, const TUInt); /// subtract another range, but preserve boundary values if already reached -inline Range& operator-=(Range &rng, const Range &other) { +inline Range& operator-=(Range &rng, const Range &other) +{ rng += (-other); return rng; } -inline Range operator+(Range rng, const Range &other) { +inline Range operator+(Range rng, const Range &other) +{ rng += other; return rng; } -inline Range operator*(Range rng, const Range &other) { +inline Range operator*(Range rng, const Range &other) +{ rng *= other; return rng; } -inline Range operator-(Range rng, const Range &other) { +inline Range operator-(Range rng, const Range &other) +{ rng -= other; return rng; } -inline Range operator&(Range rng, const TInt mask) { +inline Range operator&(Range rng, const TInt mask) +{ rng &= mask; return rng; } -inline Range operator<<(Range rng, const TUInt n) { +inline Range operator<<(Range rng, const TUInt n) +{ rng <<= n; return rng; } -inline Range operator>>(Range rng, const TUInt n) { +inline Range operator>>(Range rng, const TUInt n) +{ rng >>= n; return rng; } diff --git a/sl/memdebug.cc b/sl/memdebug.cc index 81e080220..afb334cba 100644 --- a/sl/memdebug.cc +++ b/sl/memdebug.cc @@ -30,7 +30,8 @@ static bool overflowDetected; static ssize_t peak; -bool rawMemUsage(ssize_t *pDst) { +bool rawMemUsage(ssize_t *pDst) +{ if (::overflowDetected) return false; @@ -53,7 +54,8 @@ bool rawMemUsage(ssize_t *pDst) { static ssize_t memDrift; -bool initMemDrift() { +bool initMemDrift() +{ if (rawMemUsage(&::memDrift)) return true; @@ -62,7 +64,8 @@ bool initMemDrift() { return false; } -bool currentMemUsage(ssize_t *pDst) { +bool currentMemUsage(ssize_t *pDst) +{ if (!rawMemUsage(pDst)) // failed to get current memory usage return false; @@ -86,7 +89,8 @@ struct AmountFormatter { } }; -std::ostream& operator<<(std::ostream &str, const AmountFormatter &fmt) { +std::ostream& operator<<(std::ostream &str, const AmountFormatter &fmt) +{ const std::ios_base::fmtflags oldFlags = str.flags(); const int oldPrecision = str.precision(); @@ -99,7 +103,8 @@ std::ostream& operator<<(std::ostream &str, const AmountFormatter &fmt) { } #include -bool printMemUsage(const char *fnc) { +bool printMemUsage(const char *fnc) +{ ssize_t cb; if (!currentMemUsage(&cb)) // instead of printing misleading numbers, we rather print nothing @@ -114,7 +119,8 @@ bool printMemUsage(const char *fnc) { return true; } -bool printPeakMemUsage() { +bool printPeakMemUsage() +{ if (::overflowDetected) return false; @@ -130,23 +136,28 @@ bool printPeakMemUsage() { #else // DEBUG_MEM_USAGE -bool rawMemUsage(ssize_t *) { +bool rawMemUsage(ssize_t *) +{ return false; } -bool initMemDrift() { +bool initMemDrift() +{ return false; } -bool currentMemUsage(ssize_t *) { +bool currentMemUsage(ssize_t *) +{ return false; } -bool printMemUsage(const char *) { +bool printMemUsage(const char *) +{ return false; } -bool printPeakMemUsage() { +bool printPeakMemUsage() +{ return false; } diff --git a/sl/plotenum.cc b/sl/plotenum.cc index e54e8d5f5..b4a9d68dd 100644 --- a/sl/plotenum.cc +++ b/sl/plotenum.cc @@ -28,7 +28,8 @@ // implementation of PlotEnumerator PlotEnumerator *PlotEnumerator::inst_ = 0; -std::string PlotEnumerator::decorate(std::string name) { +std::string PlotEnumerator::decorate(std::string name) +{ // obtain a unique ID for the given name const int id = map_[name] ++; #if SYMPLOT_STOP_AFTER_N_STATES diff --git a/sl/prototype.cc b/sl/prototype.cc index 5dce4646a..380683e15 100644 --- a/sl/prototype.cc +++ b/sl/prototype.cc @@ -65,7 +65,8 @@ class ProtoCollector { bool operator()(const ObjHandle &obj); }; -bool ProtoCollector::operator()(const ObjHandle &obj) { +bool ProtoCollector::operator()(const ObjHandle &obj) +{ if (hasKey(ignoreList_, obj)) return /* continue */ true; @@ -115,7 +116,8 @@ bool collectPrototypesOf( return traverseLivePtrs(sh, root, collector); } -void objChangeProtoLevel(SymHeap &sh, TValId root, const TProtoLevel diff) { +void objChangeProtoLevel(SymHeap &sh, TValId root, const TProtoLevel diff) +{ CL_BREAK_IF(sh.valOffset(root)); const TProtoLevel level = sh.valTargetProtoLevel(root); @@ -131,22 +133,26 @@ void objChangeProtoLevel(SymHeap &sh, TValId root, const TProtoLevel diff) { sh.valTargetSetProtoLevel(peer, level + diff); } -void objIncrementProtoLevel(SymHeap &sh, TValId root) { +void objIncrementProtoLevel(SymHeap &sh, TValId root) +{ objChangeProtoLevel(sh, root, 1); } -void objDecrementProtoLevel(SymHeap &sh, TValId root) { +void objDecrementProtoLevel(SymHeap &sh, TValId root) +{ objChangeProtoLevel(sh, root, -1); } -void decrementProtoLevel(SymHeap &sh, const TValId at) { +void decrementProtoLevel(SymHeap &sh, const TValId at) +{ TValList protoList; collectPrototypesOf(protoList, sh, at, /* skipDlsPeers */ true); BOOST_FOREACH(const TValId proto, protoList) objDecrementProtoLevel(sh, proto); } -bool protoCheckConsistency(const SymHeap &sh) { +bool protoCheckConsistency(const SymHeap &sh) +{ TValList addrs; sh.gatherRootObjects(addrs); BOOST_FOREACH(const TValId root, addrs) { diff --git a/sl/sigcatch.cc b/sl/sigcatch.cc index fea06957f..1743ff65b 100644 --- a/sl/sigcatch.cc +++ b/sl/sigcatch.cc @@ -35,11 +35,13 @@ static volatile sig_atomic_t sig_flags[_NSIG]; typedef std::map TBackup; static TBackup backup; -static void generic_signal_handler(int signum) { +static void generic_signal_handler(int signum) +{ sig_flags[signum] = static_cast(true); } -bool SignalCatcher::install(int signum) { +bool SignalCatcher::install(int signum) +{ if (hasKey(backup, signum)) return false; @@ -51,7 +53,8 @@ bool SignalCatcher::install(int signum) { return true; } -bool SignalCatcher::cleanup() { +bool SignalCatcher::cleanup() +{ bool ok = true; // uninstall signal handler @@ -72,7 +75,8 @@ bool SignalCatcher::cleanup() { return true; } -bool SignalCatcher::caught(int signum) { +bool SignalCatcher::caught(int signum) +{ if (!sig_flags[signum]) return false; @@ -80,7 +84,8 @@ bool SignalCatcher::caught(int signum) { return true; } -bool SignalCatcher::caught(int *pSignum) { +bool SignalCatcher::caught(int *pSignum) +{ BOOST_FOREACH(TBackup::const_reference item, ::backup) { const int signum = item.first; if (!caught(signum)) diff --git a/sl/symabstract.cc b/sl/symabstract.cc index 8b9b6e9c1..3e9e5082f 100644 --- a/sl/symabstract.cc +++ b/sl/symabstract.cc @@ -44,7 +44,8 @@ LOCAL_DEBUG_PLOTTER(symabstract, DEBUG_SYMABSTRACT) -void debugSymAbstract(const bool enable) { +void debugSymAbstract(const bool enable) +{ if (enable == __ldp_enabled_symabstract) return; @@ -85,7 +86,8 @@ struct UnknownValuesDuplicator { }; // when concretizing an object, we need to duplicate all _unknown_ values -void duplicateUnknownValues(SymHeap &sh, TValId at) { +void duplicateUnknownValues(SymHeap &sh, TValId at) +{ UnknownValuesDuplicator visitor; buildIgnoreList(visitor.ignoreList, sh, at); @@ -132,7 +134,8 @@ void detachClonedPrototype( } } -TValId protoClone(SymHeap &sh, const TValId proto) { +TValId protoClone(SymHeap &sh, const TValId proto) +{ const TValId clone = segClone(sh, proto); objDecrementProtoLevel(sh, clone); @@ -207,7 +210,8 @@ struct ValueSynchronizer { } }; -void dlSegSyncPeerData(SymHeap &sh, const TValId dls) { +void dlSegSyncPeerData(SymHeap &sh, const TValId dls) +{ const TValId peer = dlSegPeer(sh, dls); ValueSynchronizer visitor(sh); buildIgnoreList(visitor.ignoreList, sh, dls); @@ -224,7 +228,8 @@ void dlSegSyncPeerData(SymHeap &sh, const TValId dls) { } // FIXME: the semantics of this function is quite contra-intuitive -TValId segDeepCopy(SymHeap &sh, TValId seg) { +TValId segDeepCopy(SymHeap &sh, TValId seg) +{ // collect the list of prototypes TValList protoList; collectPrototypesOf(protoList, sh, seg, /* skipDlsPeers */ true); @@ -241,7 +246,8 @@ TValId segDeepCopy(SymHeap &sh, TValId seg) { return dup; } -void enlargeMayExist(SymHeap &sh, const TValId at) { +void enlargeMayExist(SymHeap &sh, const TValId at) +{ const EObjKind kind = sh.valTargetKind(at); if (!isMayExistObj(kind)) return; @@ -282,7 +288,8 @@ void slSegAbstractionStep( sh.segSetMinLength(nextAt, len); } -void dlSegCreate(SymHeap &sh, TValId a1, TValId a2, BindingOff off) { +void dlSegCreate(SymHeap &sh, TValId a1, TValId a2, BindingOff off) +{ // compute resulting segment's length const TMinLen len = objMinLength(sh, a1) + objMinLength(sh, a2); @@ -303,7 +310,8 @@ void dlSegCreate(SymHeap &sh, TValId a1, TValId a2, BindingOff off) { sh.segSetMinLength(a1, len); } -void dlSegGobble(SymHeap &sh, TValId dls, TValId var, bool backward) { +void dlSegGobble(SymHeap &sh, TValId dls, TValId var, bool backward) +{ CL_BREAK_IF(OK_DLS != sh.valTargetKind(dls)); // compute the resulting minimal length @@ -337,7 +345,8 @@ void dlSegGobble(SymHeap &sh, TValId dls, TValId var, bool backward) { dlSegSyncPeerData(sh, dls); } -void dlSegMerge(SymHeap &sh, TValId seg1, TValId seg2) { +void dlSegMerge(SymHeap &sh, TValId seg1, TValId seg2) +{ // compute the resulting minimal length const TMinLen len = sh.segMinLength(seg1) + sh.segMinLength(seg2); @@ -504,7 +513,8 @@ bool applyAbstraction( return true; } -void dlSegReplaceByConcrete(SymHeap &sh, TValId seg, TValId peer) { +void dlSegReplaceByConcrete(SymHeap &sh, TValId seg, TValId peer) +{ LDP_INIT(symabstract, "dlSegReplaceByConcrete"); LDP_PLOT(symabstract, sh); CL_BREAK_IF(!dlSegCheckConsistency(sh)); @@ -607,7 +617,8 @@ void spliceOutSegmentIfNeeded( LDP_PLOT(symabstract, sh); } -void abstractIfNeeded(SymHeap &sh) { +void abstractIfNeeded(SymHeap &sh) +{ #if SE_DISABLE_SLS && SE_DISABLE_DLS return; #endif diff --git a/sl/symbin.cc b/sl/symbin.cc index 3ed508510..63527c032 100644 --- a/sl/symbin.cc +++ b/sl/symbin.cc @@ -84,7 +84,8 @@ bool readPlotName( return true; } -void emitPrototypeError(const struct cl_loc *lw, const char *name) { +void emitPrototypeError(const struct cl_loc *lw, const char *name) +{ CL_WARN_MSG(lw, "incorrectly called " << name << "() not recognized as built-in"); } @@ -144,7 +145,8 @@ void printUserMessage(SymProc &proc, const struct cl_operand &opMsg) CL_NOTE_MSG(loc, "user message: " << msg); } -bool validateStringOp(SymProc &proc, TOp op, TSizeRange *pSize = 0) { +bool validateStringOp(SymProc &proc, TOp op, TSizeRange *pSize = 0) +{ SymHeap &sh = proc.sh(); const struct cl_loc *loc = proc.lw(); @@ -886,7 +888,8 @@ class BuiltInTable { BuiltInTable *BuiltInTable::inst_; /// register built-ins -BuiltInTable::BuiltInTable() { +BuiltInTable::BuiltInTable() +{ // C run-time tbl_["abort"] = handleAbort; tbl_["calloc"] = handleCalloc; @@ -958,7 +961,8 @@ bool BuiltInTable::handleBuiltIn( return hdl(dst, core, insn, name); } -const TOpIdxList& BuiltInTable::lookForDerefs(const char *name) const { +const TOpIdxList& BuiltInTable::lookForDerefs(const char *name) const +{ TDerefMap::const_iterator it = der_.find(name); if (der_.end() == it) // no fnc name matched as built-in diff --git a/sl/symbt.cc b/sl/symbt.cc index 63064fd64..8cd52977d 100644 --- a/sl/symbt.cc +++ b/sl/symbt.cc @@ -75,7 +75,8 @@ struct SymBackTrace::Private { void popFnc(); }; -const CodeStorage::Fnc* SymBackTrace::Private::fncOnTop() const { +const CodeStorage::Fnc* SymBackTrace::Private::fncOnTop() const +{ if (this->btStack.empty()) // empty stack, so there is no top return 0; @@ -89,7 +90,8 @@ const CodeStorage::Fnc* SymBackTrace::Private::fncOnTop() const { return fnc; } -const CodeStorage::Fnc* SymBackTrace::Private::fncById(int id) const { +const CodeStorage::Fnc* SymBackTrace::Private::fncById(int id) const +{ const CodeStorage::Fnc *fnc = this->stor.fncs[id]; // check fnc ID validity @@ -114,7 +116,8 @@ void SymBackTrace::Private::pushFnc( ++ref; } -void SymBackTrace::Private::popFnc() { +void SymBackTrace::Private::popFnc() +{ const CodeStorage::Fnc *fnc = this->fncOnTop(); this->btStack.pop_front(); @@ -142,15 +145,18 @@ SymBackTrace::SymBackTrace(const SymBackTrace &ref): { } -SymBackTrace::~SymBackTrace() { +SymBackTrace::~SymBackTrace() +{ delete d; } -const CodeStorage::Storage& SymBackTrace::stor() const { +const CodeStorage::Storage& SymBackTrace::stor() const +{ return d->stor; } -bool SymBackTrace::printBackTrace(bool forcePtrace) const { +bool SymBackTrace::printBackTrace(bool forcePtrace) const +{ using namespace CodeStorage; Private::TStackPP ppStack(d->ppStack); @@ -186,7 +192,8 @@ void SymBackTrace::pushCall( d->pushFnc(fnc, loc); } -const CodeStorage::Fnc* SymBackTrace::popCall() { +const CodeStorage::Fnc* SymBackTrace::popCall() +{ const CodeStorage::Fnc *fnc = d->fncOnTop(); // check bt integrity @@ -196,16 +203,19 @@ const CodeStorage::Fnc* SymBackTrace::popCall() { return fnc; } -unsigned SymBackTrace::size() const { +unsigned SymBackTrace::size() const +{ return d->btStack.size(); } -int SymBackTrace::countOccurrencesOfFnc(int fncId) const { +int SymBackTrace::countOccurrencesOfFnc(int fncId) const +{ const CodeStorage::Fnc *fnc = d->fncById(fncId); return d->nestMap[fnc]; } -int SymBackTrace::countOccurrencesOfTopFnc() const { +int SymBackTrace::countOccurrencesOfTopFnc() const +{ const CodeStorage::Fnc *fnc = d->fncOnTop(); if (!fnc) // empty stack --> no occurrence @@ -214,21 +224,25 @@ int SymBackTrace::countOccurrencesOfTopFnc() const { return d->nestMap[fnc]; } -const CodeStorage::Fnc* SymBackTrace::topFnc() const { +const CodeStorage::Fnc* SymBackTrace::topFnc() const +{ return d->fncOnTop(); } -const struct cl_loc* SymBackTrace::topCallLoc() const { +const struct cl_loc* SymBackTrace::topCallLoc() const +{ CL_BREAK_IF(d->btStack.empty()); const Private::BtStackItem &top = d->btStack.front(); return top.loc; } -void SymBackTrace::pushPathTracer(const IPathTracer *pp) { +void SymBackTrace::pushPathTracer(const IPathTracer *pp) +{ d->ppStack.push(pp); } -void SymBackTrace::popPathTracer(const IPathTracer *pp) { +void SymBackTrace::popPathTracer(const IPathTracer *pp) +{ CL_BREAK_IF(d->ppStack.empty()); CL_BREAK_IF(d->ppStack.top() != pp); (void) pp; diff --git a/sl/symcall.cc b/sl/symcall.cc index 18d17114e..52c7aee10 100644 --- a/sl/symcall.cc +++ b/sl/symcall.cc @@ -115,7 +115,8 @@ class PerFncCache { } }; -int PerFncCache::lookupCore(const SymHeap &sh) { +int PerFncCache::lookupCore(const SymHeap &sh) +{ #if 1 < SE_ENABLE_CALL_CACHE #if SE_STATE_ON_THE_FLY_ORDERING #error "SE_STATE_ON_THE_FLY_ORDERING is incompatible with join-based call cache" @@ -248,15 +249,18 @@ SymCallCtx::SymCallCtx(SymCallCache::Private *cd): { } -SymCallCtx::~SymCallCtx() { +SymCallCtx::~SymCallCtx() +{ delete d; } -bool SymCallCtx::needExec() const { +bool SymCallCtx::needExec() const +{ return !d->computed; } -bool SymCallCtx::inUse() const { +bool SymCallCtx::inUse() const +{ if (!d->flushed) return true; @@ -264,15 +268,18 @@ bool SymCallCtx::inUse() const { return false; } -const SymHeap& SymCallCtx::entry() const { +const SymHeap& SymCallCtx::entry() const +{ return d->entry; } -SymState& SymCallCtx::rawResults() { +SymState& SymCallCtx::rawResults() +{ return d->rawResults; } -void SymCallCtx::Private::assignReturnValue(SymHeap &sh) { +void SymCallCtx::Private::assignReturnValue(SymHeap &sh) +{ const cl_operand &op = *this->dst; if (CL_OPERAND_VOID == op.code) // we're done for a function returning void @@ -300,7 +307,8 @@ void SymCallCtx::Private::assignReturnValue(SymHeap &sh) { proc.objSetValue(objDst, val); } -void SymCallCtx::Private::destroyStackFrame(SymHeap &sh) { +void SymCallCtx::Private::destroyStackFrame(SymHeap &sh) +{ SymProc proc(sh, &this->cd->bt); // We need to look for junk since there can be a function returning an @@ -334,7 +342,8 @@ void SymCallCtx::Private::destroyStackFrame(SymHeap &sh) { } } -bool isGlVar(EValueTarget code) { +bool isGlVar(EValueTarget code) +{ return (VT_STATIC == code); } @@ -392,7 +401,8 @@ void joinHeapsWithCare( LDP_PLOT(symcall, sh); } -void SymCallCtx::flushCallResults(SymState &dst) { +void SymCallCtx::flushCallResults(SymState &dst) +{ using namespace Trace; // are we really ready for this? @@ -451,7 +461,8 @@ void SymCallCtx::flushCallResults(SymState &dst) { d->cd->bt.popCall(); } -void SymCallCtx::invalidate() { +void SymCallCtx::invalidate() +{ #if SE_ENABLE_CALL_CACHE # if SE_CALL_CACHE_MISS_THR typedef SymCallCache::Private::TCache TCache; @@ -489,15 +500,18 @@ SymCallCache::SymCallCache(TStorRef stor, bool ptrace): { } -SymCallCache::~SymCallCache() { +SymCallCache::~SymCallCache() +{ delete d; } -SymBackTrace& SymCallCache::bt() { +SymBackTrace& SymCallCache::bt() +{ return d->bt; } -void pullGlVar(SymHeap &result, SymHeap origin, const CVar &cv) { +void pullGlVar(SymHeap &result, SymHeap origin, const CVar &cv) +{ // do not try to combine things, it causes problems CL_BREAK_IF(!areEqual(result, SymHeap(origin.stor(), origin.traceNode()))); @@ -513,7 +527,8 @@ void pullGlVar(SymHeap &result, SymHeap origin, const CVar &cv) { result.swap(origin); } -void pushGlVar(SymHeap &dst, const SymHeap &glSubHeap, const CVar &cv) { +void pushGlVar(SymHeap &dst, const SymHeap &glSubHeap, const CVar &cv) +{ // make sure the gl var is alive in 'glSubHeap' CL_BREAK_IF(!isVarAlive(const_cast(glSubHeap), cv)); @@ -531,7 +546,8 @@ void pushGlVar(SymHeap &dst, const SymHeap &glSubHeap, const CVar &cv) { joinHeapsByCVars(&dst, &glSubHeap); } -void SymCallCache::Private::importGlVar(SymHeap &entry, const CVar &cv) { +void SymCallCache::Private::importGlVar(SymHeap &entry, const CVar &cv) +{ const int cnt = this->ctxStack.size(); if (!cnt) { // empty ctx stack --> no heap to import the var from @@ -690,7 +706,8 @@ void setCallArgs( srcProc.killInsn(insn); } -SymCallCtx* SymCallCache::Private::getCallCtx(const SymHeap &entry, TFncRef fnc) { +SymCallCtx* SymCallCache::Private::getCallCtx(const SymHeap &entry, TFncRef fnc) +{ // cache lookup const int uid = uidOf(fnc); PerFncCache &pfc = this->cache[uid]; diff --git a/sl/symcmp.hh b/sl/symcmp.hh index a0fb4731b..00665f275 100644 --- a/sl/symcmp.hh +++ b/sl/symcmp.hh @@ -37,7 +37,8 @@ bool areEqual( const SymHeap &sh1, const SymHeap &sh2); -inline bool checkNonPosValues(int a, int b) { +inline bool checkNonPosValues(int a, int b) +{ if (0 < a && 0 < b) // we'll need to properly compare positive values return true; diff --git a/sl/symcut.cc b/sl/symcut.cc index 93190cbfb..2c16f3a57 100644 --- a/sl/symcut.cc +++ b/sl/symcut.cc @@ -123,7 +123,8 @@ void digSubObjs(DeepCopyData &dc, TValId addrSrc, TValId addrDst) traverseLiveObjsGeneric<2>(heaps, roots, objVisitor); } -TValId /* rootDstAt */ addObjectIfNeeded(DeepCopyData &dc, TValId rootSrcAt) { +TValId /* rootDstAt */ addObjectIfNeeded(DeepCopyData &dc, TValId rootSrcAt) +{ if (VAL_NULL == rootSrcAt) return VAL_NULL; @@ -193,7 +194,8 @@ TValId /* rootDstAt */ addObjectIfNeeded(DeepCopyData &dc, TValId rootSrcAt) { return rootDstAt; } -TValId handleValueCore(DeepCopyData &dc, TValId srcAt) { +TValId handleValueCore(DeepCopyData &dc, TValId srcAt) +{ TValMap &valMap = dc.valMap; TValMap::iterator iterValSrc = valMap.find(srcAt); if (valMap.end() != iterValSrc) @@ -220,7 +222,8 @@ TValId handleValueCore(DeepCopyData &dc, TValId srcAt) { return dstAt; } -TValId handleCustomValue(DeepCopyData &dc, const TValId valSrc) { +TValId handleCustomValue(DeepCopyData &dc, const TValId valSrc) +{ // custom value, e.g. fnc pointer const CustomValue custom = dc.src.valUnwrapCustom(valSrc); const TValId valDst = dc.dst.valWrapCustom(custom); @@ -228,7 +231,8 @@ TValId handleCustomValue(DeepCopyData &dc, const TValId valSrc) { return valDst; } -void trackUses(DeepCopyData &dc, TValId valSrc) { +void trackUses(DeepCopyData &dc, TValId valSrc) +{ if (!dc.digBackward) // optimization return; @@ -258,7 +262,8 @@ void trackUses(DeepCopyData &dc, TValId valSrc) { } } -TValId handleValue(DeepCopyData &dc, TValId valSrc) { +TValId handleValue(DeepCopyData &dc, TValId valSrc) +{ SymHeap &src = dc.src; SymHeap &dst = dc.dst; @@ -290,7 +295,8 @@ TValId handleValue(DeepCopyData &dc, TValId valSrc) { return valDst; } -void deepCopy(DeepCopyData &dc) { +void deepCopy(DeepCopyData &dc) +{ SymHeap &src = dc.src; SymHeap &dst = dc.dst; diff --git a/sl/symdiscover.cc b/sl/symdiscover.cc index daef5954a..892ef1b50 100644 --- a/sl/symdiscover.cc +++ b/sl/symdiscover.cc @@ -41,7 +41,8 @@ #define SE_PROTO_COST_ASYM 1 #define SE_PROTO_COST_THREEWAY 2 -int minLengthByCost(int cost) { +int minLengthByCost(int cost) +{ // abstraction length thresholds are now configurable in config.h static const int thrTable[] = { (SE_COST0_LEN_THR), @@ -293,7 +294,8 @@ TValId jumpToNextObj( return next; } -bool isPointedByVar(SymHeap &sh, const TValId root) { +bool isPointedByVar(SymHeap &sh, const TValId root) +{ ObjList refs; sh.pointedBy(refs, root); BOOST_FOREACH(const ObjHandle obj, refs) { diff --git a/sl/symdiscover.hh b/sl/symdiscover.hh index f09cc6aa4..58c74a58f 100644 --- a/sl/symdiscover.hh +++ b/sl/symdiscover.hh @@ -29,7 +29,8 @@ #include "symheap.hh" /// return true if the given binding is a DLS binding -inline bool isDlsBinding(const BindingOff &off) { +inline bool isDlsBinding(const BindingOff &off) +{ return (off.next != off.prev); } diff --git a/sl/symdump.cc b/sl/symdump.cc index a87b9078e..f924abf38 100644 --- a/sl/symdump.cc +++ b/sl/symdump.cc @@ -34,7 +34,8 @@ using std::cout; SymHeapCore *symdump_ref_heap; -void dump_plot_core(const SymHeapCore *core, const char *name) { +void dump_plot_core(const SymHeapCore *core, const char *name) +{ if (!core) { cout << "dump_plot: error: got a NULL pointer\n"; return; @@ -54,18 +55,22 @@ void dump_plot_core(const SymHeapCore *core, const char *name) { cout << "dump_plot: warning: call of SymPlot::plot() has failed\n"; } -void dump_plot(const SymHeapCore *sh) { +void dump_plot(const SymHeapCore *sh) +{ dump_plot_core(sh, "dump_plot"); } -void dump_plot(const SymHeapCore &sh, const char *name) { +void dump_plot(const SymHeapCore &sh, const char *name) +{ dump_plot_core(&sh, name); } -void dump_plot(const SymHeapCore &sh) { +void dump_plot(const SymHeapCore &sh) +{ dump_plot(&sh); } -void dump_trace(Trace::Node *endPoint) { +void dump_trace(Trace::Node *endPoint) +{ Trace::plotTrace(endPoint, "dump_trace"); } diff --git a/sl/syments.hh b/sl/syments.hh index b22bd276c..20ca77cda 100644 --- a/sl/syments.hh +++ b/sl/syments.hh @@ -211,7 +211,8 @@ class EntStore { // implementation of EntStore template template -TId EntStore::assignId(TBaseEnt *ptr) { +TId EntStore::assignId(TBaseEnt *ptr) +{ CL_BREAK_IF(ptr->refCnt.isShared()); #if SH_REUSE_FREE_IDS @@ -230,7 +231,8 @@ TId EntStore::assignId(TBaseEnt *ptr) { template template -void EntStore::assignId(const TId id, TBaseEnt *ptr) { +void EntStore::assignId(const TId id, TBaseEnt *ptr) +{ CL_BREAK_IF(ptr->refCnt.isShared()); // make sure we have enough space allocated @@ -247,7 +249,8 @@ void EntStore::assignId(const TId id, TBaseEnt *ptr) { template template -void EntStore::releaseEnt(const TId id) { +void EntStore::releaseEnt(const TId id) +{ #if SH_REUSE_FREE_IDS freeIds_.push(id); #endif @@ -256,7 +259,8 @@ void EntStore::releaseEnt(const TId id) { template template -bool EntStore::isValidEnt(const TId id) const { +bool EntStore::isValidEnt(const TId id) const +{ if (this->outOfRange(id)) return false; @@ -273,7 +277,8 @@ EntStore::EntStore(const EntStore &ref): } template -EntStore::~EntStore() { +EntStore::~EntStore() +{ BOOST_FOREACH(TBaseEnt *ent, ents_) if (ent) RefCntLib::leave(ent); @@ -306,7 +311,8 @@ inline TBaseEnt* EntStore::getEntRW(const TId id) template template -inline void EntStore::getEntRO(const TEnt **pEnt, const TId id) { +inline void EntStore::getEntRO(const TEnt **pEnt, const TId id) +{ const TBaseEnt *ptr = this->getEntRO(id); const TEnt *ent = DCAST(ptr); @@ -319,7 +325,8 @@ inline void EntStore::getEntRO(const TEnt **pEnt, const TId id) { template template -inline void EntStore::getEntRW(TEnt **pEnt, const TId id) { +inline void EntStore::getEntRW(TEnt **pEnt, const TId id) +{ TBaseEnt *ptr = this->getEntRW(id); TEnt *ent = DCAST(ptr); diff --git a/sl/symexec.cc b/sl/symexec.cc index a6d2fdab5..68837e673 100644 --- a/sl/symexec.cc +++ b/sl/symexec.cc @@ -46,7 +46,8 @@ LOCAL_DEBUG_PLOTTER(nondetCond, DEBUG_SE_NONDET_COND) -bool installSignalHandlers(void) { +bool installSignalHandlers(void) +{ // will be processed in SymExecEngine::processPendingSignals() eventually return SignalCatcher::install(SIGINT) && SignalCatcher::install(SIGUSR1) @@ -230,7 +231,8 @@ void SymExecEngine::initEngine(const SymHeap &init) sched_.schedule(entry); } -void SymExecEngine::execJump() { +void SymExecEngine::execJump() +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); const CodeStorage::TTargetList &tlist = insn->targets; @@ -242,7 +244,8 @@ void SymExecEngine::execJump() { this->updateState(sh, tlist[/* target */ 0]); } -void SymExecEngine::execReturn() { +void SymExecEngine::execReturn() +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); const CodeStorage::TOperandList &opList = insn->operands; CL_BREAK_IF(1 != opList.size()); @@ -318,7 +321,8 @@ void SymExecEngine::updateState(SymHeap &sh, const CodeStorage::Block *ofBlock) } } -bool isAnyAbstractOf(const SymHeapCore &sh, const TValId v1, const TValId v2) { +bool isAnyAbstractOf(const SymHeapCore &sh, const TValId v1, const TValId v2) +{ return isAbstract(sh.valTarget(v1)) || isAbstract(sh.valTarget(v2)); } @@ -364,7 +368,8 @@ void SymExecEngine::updateStateInBranch( } } -bool isTrackableValue(const SymHeap &sh, const TValId val) { +bool isTrackableValue(const SymHeap &sh, const TValId val) +{ const EValueTarget code = sh.valTarget(val); if (isPossibleToDeref(code)) return true; @@ -426,7 +431,8 @@ bool SymExecEngine::bypassNonPointers( return true; } -void SymExecEngine::execCondInsn() { +void SymExecEngine::execCondInsn() +{ // we should get a CL_INSN_BINOP instruction and a CL_INSN_COND instruction const CodeStorage::Insn *insnCmp = block_->operator[](insnIdx_ - 1); const CodeStorage::Insn *insnCnd = block_->operator[](insnIdx_); @@ -519,7 +525,8 @@ void SymExecEngine::execCondInsn() { this->updateStateInBranch(sh, false, *insnCmp, *insnCnd, v1, v2); } -void SymExecEngine::execTermInsn() { +void SymExecEngine::execTermInsn() +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); const enum cl_insn_e code = insn->code; @@ -546,7 +553,8 @@ void SymExecEngine::execTermInsn() { } } -bool /* handled */ SymExecEngine::execNontermInsn() { +bool /* handled */ SymExecEngine::execNontermInsn() +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); // set some properties of the execution @@ -579,7 +587,8 @@ bool /* handled */ SymExecEngine::execNontermInsn() { return /* insn handled */ true; } -bool /* complete */ SymExecEngine::execInsn() { +bool /* complete */ SymExecEngine::execInsn() +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); // true for terminal instruction @@ -658,7 +667,8 @@ bool /* complete */ SymExecEngine::execInsn() { return true; } -bool /* complete */ SymExecEngine::execBlock() { +bool /* complete */ SymExecEngine::execBlock() +{ const std::string &name = block_->name(); if (insnIdx_ || heapIdx_) { @@ -714,7 +724,8 @@ bool /* complete */ SymExecEngine::execBlock() { return true; } -void SymExecEngine::joinCallResults() { +void SymExecEngine::joinCallResults() +{ #if SE_ABSTRACT_ON_CALL_DONE SymStateWithJoin all; #else @@ -739,7 +750,8 @@ void SymExecEngine::joinCallResults() { all.swap(nextLocalState_); } -bool /* complete */ SymExecEngine::run() { +bool /* complete */ SymExecEngine::run() +{ const CodeStorage::Fnc fnc = *bt_.topFnc(); if (waiting_) { @@ -797,7 +809,8 @@ bool /* complete */ SymExecEngine::run() { return true; } -void SymExecEngine::printStatsHelper(const BlockScheduler::TBlock bb) const { +void SymExecEngine::printStatsHelper(const BlockScheduler::TBlock bb) const +{ const std::string &name = bb->name(); // query total count of heaps @@ -819,7 +832,8 @@ void SymExecEngine::printStatsHelper(const BlockScheduler::TBlock bb) const { ", " << waiting << " heap(s) pending"); } -void SymExecEngine::printStats() const { +void SymExecEngine::printStats() const +{ // per function statistics const BlockScheduler::TBlockSet &bset = sched_.todo(); CL_NOTE_MSG(lw_, @@ -851,7 +865,8 @@ void SymExecEngine::printStats() const { #endif } -void SymExecEngine::dumpStateMap() { +void SymExecEngine::dumpStateMap() +{ const BlockScheduler::TBlockList bbs(sched_.done()); BOOST_FOREACH(const BlockScheduler::TBlock block, bbs) { const std::string name = block->name(); @@ -862,12 +877,14 @@ void SymExecEngine::dumpStateMap() { } } -const SymHeap& SymExecEngine::callEntry() const { +const SymHeap& SymExecEngine::callEntry() const +{ CL_BREAK_IF(heapIdx_ < 1); return localState_[heapIdx_ - /* already incremented for next wheel */ 1]; } -const CodeStorage::Insn& SymExecEngine::callInsn() const { +const CodeStorage::Insn& SymExecEngine::callInsn() const +{ const CodeStorage::Insn *insn = block_->operator[](insnIdx_); // check for possible protocol error @@ -876,19 +893,23 @@ const CodeStorage::Insn& SymExecEngine::callInsn() const { return *insn; } -SymState& SymExecEngine::callResults() { +SymState& SymExecEngine::callResults() +{ return callResults_; } -bool SymExecEngine::endReached() const { +bool SymExecEngine::endReached() const +{ return endReached_; } -void SymExecEngine::forceEndReached() { +void SymExecEngine::forceEndReached() +{ endReached_ = true; } -void SymExecEngine::processPendingSignals() { +void SymExecEngine::processPendingSignals() +{ int signum; if (!SignalCatcher::caught(&signum)) return; @@ -907,7 +928,8 @@ void SymExecEngine::processPendingSignals() { } } -void SymExecEngine::pruneOrigin() { +void SymExecEngine::pruneOrigin() +{ #if SE_STATE_PRUNING_MODE if (block_->isLoopEntry()) #endif @@ -969,7 +991,8 @@ void SymExecEngine::pruneOrigin() { // ///////////////////////////////////////////////////////////////////////////// // SymExec implementation -SymExec::~SymExec() { +SymExec::~SymExec() +{ // NOTE this is actually the right direction (from top of the backtrace) BOOST_FOREACH(const ExecStackItem &item, execStack_) { @@ -1054,7 +1077,8 @@ const CodeStorage::Fnc* SymExec::resolveCallInsn( return 0; } -void SymExec::enterCall(SymCallCtx *ctx, SymState &results) { +void SymExec::enterCall(SymCallCtx *ctx, SymState &results) +{ // create engine SymExecEngine *eng = new SymExecEngine( ctx->rawResults(), @@ -1154,7 +1178,8 @@ void SymExec::execFnc( } } -void SymExec::printStats() const { +void SymExec::printStats() const +{ // TODO: print SymCallCache stats here as soon as we have implemented some BOOST_FOREACH(const ExecStackItem &item, execStack_) { diff --git a/sl/symgc.cc b/sl/symgc.cc index ebb3ec74c..39fe91503 100644 --- a/sl/symgc.cc +++ b/sl/symgc.cc @@ -32,7 +32,8 @@ #include -void gatherReferredRoots(TValList &dst, SymHeap &sh, TValId at) { +void gatherReferredRoots(TValList &dst, SymHeap &sh, TValId at) +{ CL_BREAK_IF(sh.valOffset(at)); ObjList ptrs; @@ -47,7 +48,8 @@ void gatherReferredRoots(TValList &dst, SymHeap &sh, TValId at) { } } -bool isJunk(SymHeap &sh, TValId root) { +bool isJunk(SymHeap &sh, TValId root) +{ WorkList wl(root); while (wl.next(root)) { @@ -69,7 +71,8 @@ bool isJunk(SymHeap &sh, TValId root) { return true; } -bool gcCore(SymHeap &sh, TValId root, TValList *leakList, bool sharedOnly) { +bool gcCore(SymHeap &sh, TValId root, TValList *leakList, bool sharedOnly) +{ CL_BREAK_IF(sh.valOffset(root)); bool detected = false; @@ -113,11 +116,13 @@ bool gcCore(SymHeap &sh, TValId root, TValList *leakList, bool sharedOnly) { return detected; } -bool collectJunk(SymHeap &sh, TValId root, TValList *leakList) { +bool collectJunk(SymHeap &sh, TValId root, TValList *leakList) +{ return gcCore(sh, root, leakList, /* sharedOnly */ false); } -bool collectSharedJunk(SymHeap &sh, TValId root, TValList *leakList) { +bool collectSharedJunk(SymHeap &sh, TValId root, TValList *leakList) +{ return gcCore(sh, root, leakList, /* sharedOnly */ true); } @@ -150,7 +155,8 @@ bool destroyRootAndCollectJunk( // implementation of LeakMonitor static bool debuggingGarbageCollector = static_cast(DEBUG_SYMGC); -void debugGarbageCollector(const bool enable) { +void debugGarbageCollector(const bool enable) +{ if (enable == ::debuggingGarbageCollector) return; @@ -158,12 +164,14 @@ void debugGarbageCollector(const bool enable) { ::debuggingGarbageCollector = enable; } -void LeakMonitor::enter() { +void LeakMonitor::enter() +{ if (::debuggingGarbageCollector) snap_ = sh_; } -void LeakMonitor::leave() { +void LeakMonitor::leave() +{ if (leakList_.empty()) return; @@ -172,7 +180,8 @@ void LeakMonitor::leave() { /* digForward */ false); } -bool /* leaking */ LeakMonitor::importLeakList(TValList *leakList) { +bool /* leaking */ LeakMonitor::importLeakList(TValList *leakList) +{ CL_BREAK_IF(!leakList_.empty()); leakList_ = *leakList; diff --git a/sl/symheap.cc b/sl/symheap.cc index 074d3a9fc..1df3b8452 100644 --- a/sl/symheap.cc +++ b/sl/symheap.cc @@ -61,7 +61,8 @@ assignInvalidIfNotFound( static bool bypassSelfChecks; -void enableProtectedMode(bool enable) { +void enableProtectedMode(bool enable) +{ ::bypassSelfChecks = !enable; } @@ -172,7 +173,8 @@ class CVarMap { // ///////////////////////////////////////////////////////////////////////////// // implementation of CustomValue -CustomValue::~CustomValue() { +CustomValue::~CustomValue() +{ if (CV_STRING != code_) return; @@ -188,7 +190,8 @@ CustomValue::CustomValue(const CustomValue &ref): data_.str = new std::string(*ref.data_.str); } -CustomValue& CustomValue::operator=(const CustomValue &ref) { +CustomValue& CustomValue::operator=(const CustomValue &ref) +{ if (&ref == this) return *this; @@ -206,34 +209,40 @@ CustomValue& CustomValue::operator=(const CustomValue &ref) { return *this; } -int CustomValue::uid() const { +int CustomValue::uid() const +{ CL_BREAK_IF(CV_FNC != code_); return data_.uid; } -IR::Range& CustomValue::rng() { +IR::Range& CustomValue::rng() +{ CL_BREAK_IF(CV_INT_RANGE != code_); return data_.rng; } -double CustomValue::fpn() const { +double CustomValue::fpn() const +{ CL_BREAK_IF(CV_REAL != code_); return data_.fpn; } -const std::string& CustomValue::str() const { +const std::string& CustomValue::str() const +{ CL_BREAK_IF(CV_STRING != code_); CL_BREAK_IF(!data_.str); return *data_.str; } /// eliminates the warning 'comparing floating point with == or != is unsafe' -inline bool areEqual(const double a, const double b) { +inline bool areEqual(const double a, const double b) +{ return (a <= b) && (b <= a); } -bool operator==(const CustomValue &a, const CustomValue &b) { +bool operator==(const CustomValue &a, const CustomValue &b) +{ const ECustomValue code = a.code_; if (b.code_ != code) return false; @@ -305,7 +314,8 @@ inline void arenaLookForExactMatch( } // create a right-open interval -inline TMemChunk createChunk(const TOffset off, const TObjType clt) { +inline TMemChunk createChunk(const TOffset off, const TObjType clt) +{ CL_BREAK_IF(!clt || clt->code == CL_TYPE_VOID); return TMemChunk(off, off + clt->size); } @@ -320,7 +330,8 @@ enum EBlockKind { typedef std::map TLiveObjs; -inline EBlockKind bkFromClt(const TObjType clt) { +inline EBlockKind bkFromClt(const TObjType clt) +{ if (isComposite(clt, /* includingArray */ false)) return BK_COMPOSITE; @@ -638,18 +649,21 @@ struct SymHeapCore::Private { Private& operator=(const Private &); }; -inline TValId SymHeapCore::Private::assignId(BaseValue *valData) { +inline TValId SymHeapCore::Private::assignId(BaseValue *valData) +{ const TValId val = this->ents.assignId(valData); valData->valRoot = val; valData->anchor = val; return val; } -inline TObjId SymHeapCore::Private::assignId(BlockEntity *hbData) { +inline TObjId SymHeapCore::Private::assignId(BlockEntity *hbData) +{ return this->ents.assignId(hbData); } -bool /* wasPtr */ SymHeapCore::Private::releaseValueOf(TObjId obj, TValId val) { +bool /* wasPtr */ SymHeapCore::Private::releaseValueOf(TObjId obj, TValId val) +{ if (val <= 0) // we do not track uses of special values return /* wasPtr */ false; @@ -685,7 +699,8 @@ bool /* wasPtr */ SymHeapCore::Private::releaseValueOf(TObjId obj, TValId val) { return /* wasPtr */ true; } -void SymHeapCore::Private::registerValueOf(TObjId obj, TValId val) { +void SymHeapCore::Private::registerValueOf(TObjId obj, TValId val) +{ if (val <= 0) return; @@ -706,7 +721,8 @@ void SymHeapCore::Private::registerValueOf(TObjId obj, TValId val) { } // runs only in debug build -bool SymHeapCore::Private::chkValueDeps(const TValId val) { +bool SymHeapCore::Private::chkValueDeps(const TValId val) +{ const BaseValue *valData; this->ents.getEntRO(&valData, val); if (VT_CUSTOM != valData->code) @@ -930,12 +946,14 @@ bool isCoveredByBlock( return (end1 <= end2); } -inline bool isChar(const TObjType clt) { +inline bool isChar(const TObjType clt) +{ return (CL_TYPE_INT == clt->code) && (1 == clt->size); } -inline bool isString(const TObjType clt) { +inline bool isString(const TObjType clt) +{ return (CL_TYPE_ARRAY == clt->code) && isChar(targetTypeOfArray(clt)); } @@ -1189,7 +1207,8 @@ TObjId SymHeapCore::Private::objCreate( return obj; } -void SymHeapCore::Private::objDestroy(TObjId obj, bool removeVal, bool detach) { +void SymHeapCore::Private::objDestroy(TObjId obj, bool removeVal, bool detach) +{ BlockEntity *blData; this->ents.getEntRW(&blData, obj); @@ -1261,7 +1280,8 @@ TValId SymHeapCore::Private::valCreate( return val; } -TValId SymHeapCore::Private::valDup(TValId val) { +TValId SymHeapCore::Private::valDup(TValId val) +{ if (val <= 0) // do not clone special values return val; @@ -1280,7 +1300,8 @@ TValId SymHeapCore::Private::valDup(TValId val) { } // FIXME: copy/pasted in symutil.hh -bool SymHeapCore::Private::valsEqual(TValId v1, TValId v2) { +bool SymHeapCore::Private::valsEqual(TValId v1, TValId v2) +{ if (v1 == v2) // matches trivially return true; @@ -1411,7 +1432,8 @@ SymHeapCore::Private::Private(const SymHeapCore::Private &ref): RefCntLib::enter(this->neqDb); } -SymHeapCore::Private::~Private() { +SymHeapCore::Private::~Private() +{ RefCntLib::leave(this->liveRoots); RefCntLib::leave(this->cVarMap); RefCntLib::leave(this->cValueMap); @@ -1419,7 +1441,8 @@ SymHeapCore::Private::~Private() { RefCntLib::leave(this->neqDb); } -TValId SymHeapCore::Private::objInit(TObjId obj) { +TValId SymHeapCore::Private::objInit(TObjId obj) +{ HeapObject *objData; this->ents.getEntRW(&objData, obj); CL_BREAK_IF(!objData->extRefCnt); @@ -1473,7 +1496,8 @@ TValId SymHeapCore::Private::objInit(TObjId obj) { return val; } -TValId SymHeapCore::valueOf(TObjId obj) { +TValId SymHeapCore::valueOf(TObjId obj) +{ // handle special cases first switch (obj) { case OBJ_UNKNOWN: @@ -1518,7 +1542,8 @@ TValId SymHeapCore::valueOf(TObjId obj) { return d->objInit(obj); } -void SymHeapCore::usedBy(ObjList &dst, TValId val, bool liveOnly) const { +void SymHeapCore::usedBy(ObjList &dst, TValId val, bool liveOnly) const +{ if (VAL_NULL == val) // we do not track uses of special values return; @@ -1550,7 +1575,8 @@ void SymHeapCore::usedBy(ObjList &dst, TValId val, bool liveOnly) const { } } -unsigned SymHeapCore::usedByCount(TValId val) const { +unsigned SymHeapCore::usedByCount(TValId val) const +{ if (VAL_NULL == val) return 0; @@ -1559,7 +1585,8 @@ unsigned SymHeapCore::usedByCount(TValId val) const { return valData->usedBy.size(); } -void SymHeapCore::pointedBy(ObjList &dst, TValId root) const { +void SymHeapCore::pointedBy(ObjList &dst, TValId root) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, root); CL_BREAK_IF(rootData->offRoot); @@ -1570,17 +1597,20 @@ void SymHeapCore::pointedBy(ObjList &dst, TValId root) const { dst.push_back(ObjHandle(*const_cast(this), obj)); } -unsigned SymHeapCore::pointedByCount(TValId root) const { +unsigned SymHeapCore::pointedByCount(TValId root) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, root); return rootData->usedByGl.size(); } -unsigned SymHeapCore::lastId() const { +unsigned SymHeapCore::lastId() const +{ return d->ents.lastId(); } -TValId SymHeapCore::valClone(TValId val) { +TValId SymHeapCore::valClone(TValId val) +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); @@ -1663,7 +1693,8 @@ TObjId SymHeapCore::Private::copySingleLiveBlock( return dst; } -TValId SymHeapCore::Private::dupRoot(TValId rootAt) { +TValId SymHeapCore::Private::dupRoot(TValId rootAt) +{ CL_DEBUG("SymHeapCore::Private::dupRoot() is taking place..."); const RootValue *rootDataSrc; this->ents.getEntRO(&rootDataSrc, rootAt); @@ -1693,7 +1724,8 @@ TValId SymHeapCore::Private::dupRoot(TValId rootAt) { return imageAt; } -void SymHeapCore::gatherLivePointers(ObjList &dst, TValId root) const { +void SymHeapCore::gatherLivePointers(ObjList &dst, TValId root) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, root); @@ -1707,7 +1739,8 @@ void SymHeapCore::gatherLivePointers(ObjList &dst, TValId root) const { } } -void SymHeapCore::gatherUniformBlocks(TUniBlockMap &dst, TValId root) const { +void SymHeapCore::gatherUniformBlocks(TUniBlockMap &dst, TValId root) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, root); BOOST_FOREACH(TLiveObjs::const_reference item, rootData->liveObjs) { @@ -1728,7 +1761,8 @@ void SymHeapCore::gatherUniformBlocks(TUniBlockMap &dst, TValId root) const { } } -void SymHeapCore::gatherLiveObjects(ObjList &dst, TValId root) const { +void SymHeapCore::gatherLiveObjects(ObjList &dst, TValId root) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, root); @@ -1849,12 +1883,14 @@ SymHeapCore::SymHeapCore(const SymHeapCore &ref): CL_BREAK_IF(!&stor_); } -SymHeapCore::~SymHeapCore() { +SymHeapCore::~SymHeapCore() +{ delete d; } // cppcheck-suppress operatorEqToSelf -SymHeapCore& SymHeapCore::operator=(const SymHeapCore &ref) { +SymHeapCore& SymHeapCore::operator=(const SymHeapCore &ref) +{ CL_BREAK_IF(&ref == this); CL_BREAK_IF(&stor_ != &ref.stor_); @@ -1863,20 +1899,24 @@ SymHeapCore& SymHeapCore::operator=(const SymHeapCore &ref) { return *this; } -void SymHeapCore::swap(SymHeapCore &ref) { +void SymHeapCore::swap(SymHeapCore &ref) +{ CL_BREAK_IF(&stor_ != &ref.stor_); swapValues(this->d, ref.d); } -Trace::Node* SymHeapCore::traceNode() const { +Trace::Node* SymHeapCore::traceNode() const +{ return d->traceHandle.node(); } -void SymHeapCore::traceUpdate(Trace::Node *node) { +void SymHeapCore::traceUpdate(Trace::Node *node) +{ d->traceHandle.reset(node); } -void SymHeapCore::objSetValue(TObjId obj, TValId val, TValSet *killedPtrs) { +void SymHeapCore::objSetValue(TObjId obj, TValId val, TValSet *killedPtrs) +{ // we allow to set values of atomic types only const HeapObject *objData; d->ents.getEntRO(&objData, obj); @@ -2106,7 +2146,8 @@ bool SymHeapCore::Private::findZeroAtOff( return true; } -TObjType SymHeapCore::objType(TObjId obj) const { +TObjType SymHeapCore::objType(TObjId obj) const +{ if (obj < 0) return 0; @@ -2115,7 +2156,8 @@ TObjType SymHeapCore::objType(TObjId obj) const { return objData->clt; } -TValId SymHeapCore::Private::shiftCustomValue(TValId ref, TOffset shift) { +TValId SymHeapCore::Private::shiftCustomValue(TValId ref, TOffset shift) +{ CL_BREAK_IF(!this->chkValueDeps(ref)); const InternalCustomValue *customDataRef; @@ -2142,7 +2184,8 @@ TValId SymHeapCore::Private::shiftCustomValue(TValId ref, TOffset shift) { return val; } -TValId SymHeapCore::Private::wrapIntVal(const IR::TInt num) { +TValId SymHeapCore::Private::wrapIntVal(const IR::TInt num) +{ if (IR::Int0 == num) return VAL_NULL; @@ -2165,7 +2208,8 @@ TValId SymHeapCore::Private::wrapIntVal(const IR::TInt num) { return valInt; } -void SymHeapCore::Private::replaceRngByInt(const InternalCustomValue *valData) { +void SymHeapCore::Private::replaceRngByInt(const InternalCustomValue *valData) +{ CL_DEBUG("replaceRngByInt() is taking place..."); // we already expect a scalar at this point @@ -2181,7 +2225,8 @@ void SymHeapCore::Private::replaceRngByInt(const InternalCustomValue *valData) { this->setValueOf(obj, replaceBy); } -void SymHeapCore::Private::trimCustomValue(TValId val, const IR::Range &win) { +void SymHeapCore::Private::trimCustomValue(TValId val, const IR::Range &win) +{ CL_BREAK_IF(!this->chkValueDeps(val)); const InternalCustomValue *valData; @@ -2229,7 +2274,8 @@ void SymHeapCore::Private::trimCustomValue(TValId val, const IR::Range &win) { CL_BREAK_IF(!this->chkValueDeps(val)); } -TValId SymHeapCore::valByOffset(TValId at, TOffset off) { +TValId SymHeapCore::valByOffset(TValId at, TOffset off) +{ if (!off || at < 0) return at; @@ -2280,7 +2326,8 @@ TValId SymHeapCore::valByOffset(TValId at, TOffset off) { return val; } -TValId SymHeapCore::valByRange(TValId at, IR::Range range) { +TValId SymHeapCore::valByRange(TValId at, IR::Range range) +{ if (isSingular(range)) { CL_DEBUG("valByRange() got a singular range, passing to valByOffset()"); return this->valByOffset(at, range.lo); @@ -2319,7 +2366,8 @@ TValId SymHeapCore::valByRange(TValId at, IR::Range range) { return val; } -TValId SymHeapCore::valShift(TValId valToShift, TValId shiftBy) { +TValId SymHeapCore::valShift(TValId valToShift, TValId shiftBy) +{ if (valToShift < 0) // do not shift special values return valToShift; @@ -2384,7 +2432,8 @@ TValId SymHeapCore::valShift(TValId valToShift, TValId shiftBy) { return valResult; } -void SymHeapCore::valRestrictRange(TValId val, IR::Range win) { +void SymHeapCore::valRestrictRange(TValId val, IR::Range win) +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); @@ -2448,7 +2497,8 @@ void SymHeapCore::valRestrictRange(TValId val, IR::Range win) { } } -void SymHeapCore::Private::bindValues(TValId v1, TValId v2, TValId valSum) { +void SymHeapCore::Private::bindValues(TValId v1, TValId v2, TValId valSum) +{ const BaseValue *valData1, *valData2; this->ents.getEntRO(&valData1, v1); this->ents.getEntRO(&valData2, v2); @@ -2467,7 +2517,8 @@ void SymHeapCore::Private::bindValues(TValId v1, TValId v2, TValId valSum) { this->coinDb->add(anchor1, anchor2, valSum); } -TValId SymHeapCore::diffPointers(const TValId v1, const TValId v2) { +TValId SymHeapCore::diffPointers(const TValId v1, const TValId v2) +{ const TValId root1 = this->valRoot(v1); const TValId root2 = this->valRoot(v2); if (root1 != root2) @@ -2490,7 +2541,8 @@ TValId SymHeapCore::diffPointers(const TValId v1, const TValId v2) { return valDiff; } -EValueOrigin SymHeapCore::valOrigin(TValId val) const { +EValueOrigin SymHeapCore::valOrigin(TValId val) const +{ switch (val) { case VAL_INVALID: return VO_INVALID; @@ -2508,7 +2560,8 @@ EValueOrigin SymHeapCore::valOrigin(TValId val) const { return valData->origin; } -EValueTarget SymHeapCore::valTarget(TValId val) const { +EValueTarget SymHeapCore::valTarget(TValId val) const +{ if (val <= 0) return VT_INVALID; @@ -2528,7 +2581,8 @@ EValueTarget SymHeapCore::valTarget(TValId val) const { return code; } -bool isUninitialized(EValueOrigin code) { +bool isUninitialized(EValueOrigin code) +{ switch (code) { case VO_HEAP: case VO_STACK: @@ -2539,11 +2593,13 @@ bool isUninitialized(EValueOrigin code) { } } -bool isAbstract(EValueTarget code) { +bool isAbstract(EValueTarget code) +{ return (VT_ABSTRACT == code); } -bool isKnownObject(EValueTarget code) { +bool isKnownObject(EValueTarget code) +{ switch (code) { case VT_STATIC: case VT_ON_HEAP: @@ -2555,7 +2611,8 @@ bool isKnownObject(EValueTarget code) { } } -bool isGone(EValueTarget code) { +bool isGone(EValueTarget code) +{ switch (code) { case VT_DELETED: case VT_LOST: @@ -2566,7 +2623,8 @@ bool isGone(EValueTarget code) { } } -bool isOnHeap(EValueTarget code) { +bool isOnHeap(EValueTarget code) +{ switch (code) { case VT_ON_HEAP: case VT_ABSTRACT: @@ -2577,7 +2635,8 @@ bool isOnHeap(EValueTarget code) { } } -bool isProgramVar(EValueTarget code) { +bool isProgramVar(EValueTarget code) +{ switch (code) { case VT_STATIC: case VT_ON_STACK: @@ -2588,17 +2647,20 @@ bool isProgramVar(EValueTarget code) { } } -bool isPossibleToDeref(EValueTarget code) { +bool isPossibleToDeref(EValueTarget code) +{ return isOnHeap(code) || isProgramVar(code); } -bool isAnyDataArea(EValueTarget code) { +bool isAnyDataArea(EValueTarget code) +{ return isPossibleToDeref(code) || (VT_RANGE == code); } -TValId SymHeapCore::valRoot(TValId val) const { +TValId SymHeapCore::valRoot(TValId val) const +{ if (val <= 0) return val; @@ -2607,7 +2669,8 @@ TValId SymHeapCore::valRoot(TValId val) const { return valData->valRoot; } -TOffset SymHeapCore::valOffset(TValId val) const { +TOffset SymHeapCore::valOffset(TValId val) const +{ if (val <= 0) return 0; @@ -2629,7 +2692,8 @@ TOffset SymHeapCore::valOffset(TValId val) const { } } -IR::Range SymHeapCore::valOffsetRange(TValId val) const { +IR::Range SymHeapCore::valOffsetRange(TValId val) const +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); @@ -2664,7 +2728,8 @@ IR::Range SymHeapCore::valOffsetRange(TValId val) const { return range; } -void SymHeapCore::valReplace(TValId val, TValId replaceBy) { +void SymHeapCore::valReplace(TValId val, TValId replaceBy) +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); @@ -2686,7 +2751,8 @@ void SymHeapCore::valReplace(TValId val, TValId replaceBy) { } } -void SymHeapCore::addNeq(TValId v1, TValId v2) { +void SymHeapCore::addNeq(TValId v1, TValId v2) +{ RefCntLib::requireExclusivity(d->neqDb); const EValueTarget code1 = this->valTarget(v1); @@ -2700,14 +2766,16 @@ void SymHeapCore::addNeq(TValId v1, TValId v2) { d->neqDb->add(v1, v2); } -void SymHeapCore::delNeq(TValId v1, TValId v2) { +void SymHeapCore::delNeq(TValId v1, TValId v2) +{ CL_BREAK_IF(!this->chkNeq(v1, v2)); RefCntLib::requireExclusivity(d->neqDb); d->neqDb->del(v1, v2); } -void SymHeapCore::gatherRelatedValues(TValList &dst, TValId val) const { +void SymHeapCore::gatherRelatedValues(TValList &dst, TValId val) const +{ d->neqDb->gatherRelatedValues(dst, val); d->coinDb->gatherRelatedValues(dst, val); } @@ -2808,7 +2876,8 @@ bool SymHeapCore::matchPreds(const SymHeapCore &ref, const TValMap &valMap) return true; } -TValId SymHeapCore::placedAt(TObjId obj) { +TValId SymHeapCore::placedAt(TObjId obj) +{ if (obj < 0) return VAL_INVALID; @@ -2821,7 +2890,8 @@ TValId SymHeapCore::placedAt(TObjId obj) { return this->valByOffset(root, objData->off); } -TObjId SymHeapCore::ptrAt(TValId at) { +TObjId SymHeapCore::ptrAt(TValId at) +{ if (at <= 0) return OBJ_INVALID; @@ -2884,7 +2954,8 @@ TObjId SymHeapCore::ptrAt(TValId at) { } // TODO: simplify the code -TObjId SymHeapCore::objAt(TValId at, TObjType clt) { +TObjId SymHeapCore::objAt(TValId at, TObjType clt) +{ if (at <= 0) return OBJ_INVALID; @@ -2980,14 +3051,16 @@ TObjId SymHeapCore::objAt(TValId at, TObjType clt) { return d->objCreate(root, off, clt); } -void SymHeapCore::objEnter(TObjId obj) { +void SymHeapCore::objEnter(TObjId obj) +{ HeapObject *objData; d->ents.getEntRW(&objData, obj); CL_BREAK_IF(objData->extRefCnt < 0); ++(objData->extRefCnt); } -void SymHeapCore::objLeave(TObjId obj) { +void SymHeapCore::objLeave(TObjId obj) +{ HeapObject *objData; d->ents.getEntRW(&objData, obj); CL_BREAK_IF(objData->extRefCnt < 1); @@ -3017,13 +3090,15 @@ void SymHeapCore::objLeave(TObjId obj) { // TODO: pack the representation if possible } -CVar SymHeapCore::cVarByRoot(TValId valRoot) const { +CVar SymHeapCore::cVarByRoot(TValId valRoot) const +{ const RootValue *rootData; d->ents.getEntRO(&rootData, valRoot); return rootData->cVar; } -TValId SymHeapCore::addrOfVar(CVar cv, bool createIfNeeded) { +TValId SymHeapCore::addrOfVar(CVar cv, bool createIfNeeded) +{ TValId addr = d->cVarMap->find(cv); if (0 < addr) return addr; @@ -3068,7 +3143,8 @@ TValId SymHeapCore::addrOfVar(CVar cv, bool createIfNeeded) { return addr; } -static bool dummyFilter(EValueTarget) { +static bool dummyFilter(EValueTarget) +{ return true; } @@ -3084,7 +3160,8 @@ void SymHeapCore::gatherRootObjects(TValList &dst, bool (*filter)(EValueTarget)) dst.push_back(at); } -TObjId SymHeapCore::valGetComposite(TValId val) const { +TObjId SymHeapCore::valGetComposite(TValId val) const +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); CL_BREAK_IF(VT_COMPOSITE != valData->code); @@ -3093,7 +3170,8 @@ TObjId SymHeapCore::valGetComposite(TValId val) const { return compData->compObj; } -TValId SymHeapCore::heapAlloc(const TSizeRange &size) { +TValId SymHeapCore::heapAlloc(const TSizeRange &size) +{ CL_BREAK_IF(size.lo <= IR::Int0); // assign an address @@ -3111,7 +3189,8 @@ TValId SymHeapCore::heapAlloc(const TSizeRange &size) { return addr; } -void SymHeapCore::valDestroyTarget(TValId val) { +void SymHeapCore::valDestroyTarget(TValId val) +{ if (VAL_NULL == val) { CL_BREAK_IF("SymHeapCore::valDestroyTarget() got VAL_NULL"); return; @@ -3127,7 +3206,8 @@ void SymHeapCore::valDestroyTarget(TValId val) { d->destroyRoot(val); } -TSizeRange SymHeapCore::valSizeOfTarget(TValId val) const { +TSizeRange SymHeapCore::valSizeOfTarget(TValId val) const +{ const BaseValue *valData; d->ents.getEntRO(&valData, val); if (!isPossibleToDeref(valData->code)) @@ -3152,7 +3232,8 @@ TSizeRange SymHeapCore::valSizeOfTarget(TValId val) const { return size; } -TSizeRange SymHeapCore::valSizeOfString(TValId addr) const { +TSizeRange SymHeapCore::valSizeOfString(TValId addr) const +{ const BaseValue *valData; d->ents.getEntRO(&valData, addr); @@ -3190,7 +3271,8 @@ TSizeRange SymHeapCore::valSizeOfString(TValId addr) const { return rng; } -void SymHeapCore::valSetLastKnownTypeOfTarget(TValId root, TObjType clt) { +void SymHeapCore::valSetLastKnownTypeOfTarget(TValId root, TObjType clt) +{ RootValue *rootData; d->ents.getEntRW(&rootData, root); @@ -3207,14 +3289,16 @@ void SymHeapCore::valSetLastKnownTypeOfTarget(TValId root, TObjType clt) { rootData->lastKnownClt = clt; } -TObjType SymHeapCore::valLastKnownTypeOfTarget(TValId root) const { +TObjType SymHeapCore::valLastKnownTypeOfTarget(TValId root) const +{ CL_BREAK_IF(this->valOffset(root)); const RootValue *rootData; d->ents.getEntRO(&rootData, root); return rootData->lastKnownClt; } -void SymHeapCore::Private::destroyRoot(TValId root) { +void SymHeapCore::Private::destroyRoot(TValId root) +{ RootValue *rootData; this->ents.getEntRW(&rootData, root); @@ -3273,7 +3357,8 @@ void SymHeapCore::Private::destroyRoot(TValId root) { rootData->arena.clear(); } -TValId SymHeapCore::valCreate(EValueTarget code, EValueOrigin origin) { +TValId SymHeapCore::valCreate(EValueTarget code, EValueOrigin origin) +{ switch (code) { case VT_UNKNOWN: // this is the most common case @@ -3293,7 +3378,8 @@ TValId SymHeapCore::valCreate(EValueTarget code, EValueOrigin origin) { return d->valCreate(code, origin); } -TValId SymHeapCore::valWrapCustom(CustomValue cVal) { +TValId SymHeapCore::valWrapCustom(CustomValue cVal) +{ const ECustomValue code = cVal.code(); if (CV_INT_RANGE == code) { @@ -3347,7 +3433,8 @@ const CustomValue& SymHeapCore::valUnwrapCustom(TValId val) const return cv; } -TProtoLevel SymHeapCore::valTargetProtoLevel(TValId val) const { +TProtoLevel SymHeapCore::valTargetProtoLevel(TValId val) const +{ if (val <= 0) // not a prototype for sure return 0; @@ -3365,7 +3452,8 @@ TProtoLevel SymHeapCore::valTargetProtoLevel(TValId val) const { return rootData->protoLevel; } -void SymHeapCore::valTargetSetProtoLevel(TValId root, TProtoLevel level) { +void SymHeapCore::valTargetSetProtoLevel(TValId root, TProtoLevel level) +{ CL_BREAK_IF(!isPossibleToDeref(this->valTarget(root))); CL_BREAK_IF(this->valOffset(root)); CL_BREAK_IF(level < 0); @@ -3375,7 +3463,8 @@ void SymHeapCore::valTargetSetProtoLevel(TValId root, TProtoLevel level) { rootData->protoLevel = level; } -bool SymHeapCore::chkNeq(TValId v1, TValId v2) const { +bool SymHeapCore::chkNeq(TValId v1, TValId v2) const +{ return d->neqDb->chk(v1, v2); } @@ -3419,11 +3508,13 @@ SymHeap::SymHeap(const SymHeap &ref): RefCntLib::enter(d); } -SymHeap::~SymHeap() { +SymHeap::~SymHeap() +{ RefCntLib::leave(d); } -SymHeap& SymHeap::operator=(const SymHeap &ref) { +SymHeap& SymHeap::operator=(const SymHeap &ref) +{ SymHeapCore::operator=(ref); RefCntLib::leave(d); @@ -3434,7 +3525,8 @@ SymHeap& SymHeap::operator=(const SymHeap &ref) { return *this; } -void SymHeap::swap(SymHeapCore &baseRef) { +void SymHeap::swap(SymHeapCore &baseRef) +{ // swap base SymHeapCore::swap(baseRef); @@ -3443,7 +3535,8 @@ void SymHeap::swap(SymHeapCore &baseRef) { swapValues(this->d, ref.d); } -TValId SymHeap::valClone(TValId val) { +TValId SymHeap::valClone(TValId val) +{ const TValId dup = SymHeapCore::valClone(val); if (dup <= 0 || VT_RANGE == this->valTarget(val)) return dup; @@ -3463,7 +3556,8 @@ TValId SymHeap::valClone(TValId val) { return dup; } -EObjKind SymHeap::valTargetKind(TValId val) const { +EObjKind SymHeap::valTargetKind(TValId val) const +{ if (val <= 0) return OK_CONCRETE; @@ -3475,11 +3569,13 @@ EObjKind SymHeap::valTargetKind(TValId val) const { return aData->kind; } -bool SymHeap::hasAbstractTarget(TValId val) const { +bool SymHeap::hasAbstractTarget(TValId val) const +{ return (OK_CONCRETE != this->valTargetKind(val)); } -const BindingOff& SymHeap::segBinding(TValId root) const { +const BindingOff& SymHeap::segBinding(TValId root) const +{ CL_BREAK_IF(this->valOffset(root)); CL_BREAK_IF(!this->hasAbstractTarget(root)); CL_BREAK_IF(!d->absRoots.isValidEnt(root)); @@ -3523,7 +3619,8 @@ void SymHeap::valTargetSetAbstract( d->absRoots.assignId(root, aData); } -void SymHeap::valTargetSetConcrete(TValId root) { +void SymHeap::valTargetSetConcrete(TValId root) +{ CL_DEBUG("SymHeap::valTargetSetConcrete() is taking place..."); CL_BREAK_IF(!isPossibleToDeref(this->valTarget(root))); CL_BREAK_IF(this->valOffset(root)); @@ -3536,7 +3633,8 @@ void SymHeap::valTargetSetConcrete(TValId root) { d->absRoots.releaseEnt(root); } -void SymHeap::valDestroyTarget(TValId root) { +void SymHeap::valDestroyTarget(TValId root) +{ SymHeapCore::valDestroyTarget(root); if (!d->absRoots.isValidEnt(root)) return; @@ -3550,7 +3648,8 @@ void SymHeap::valDestroyTarget(TValId root) { d->absRoots.releaseEnt(root); } -TMinLen SymHeap::segMinLength(TValId seg) const { +TMinLen SymHeap::segMinLength(TValId seg) const +{ CL_BREAK_IF(this->valOffset(seg)); CL_BREAK_IF(!d->absRoots.isValidEnt(seg)); @@ -3571,7 +3670,8 @@ TMinLen SymHeap::segMinLength(TValId seg) const { } } -void SymHeap::segSetMinLength(TValId seg, TMinLen len) { +void SymHeap::segSetMinLength(TValId seg, TMinLen len) +{ CL_BREAK_IF(this->valOffset(seg)); CL_BREAK_IF(!d->absRoots.isValidEnt(seg)); diff --git a/sl/symheap.hh b/sl/symheap.hh index 117009d72..83fcba33b 100644 --- a/sl/symheap.hh +++ b/sl/symheap.hh @@ -173,7 +173,8 @@ class CustomValue { bool operator==(const CustomValue &a, const CustomValue &b); -inline bool operator!=(const CustomValue &a, const CustomValue &b) { +inline bool operator!=(const CustomValue &a, const CustomValue &b) +{ return !operator==(a, b); } @@ -245,7 +246,8 @@ struct CVar { } }; -inline bool operator==(const CVar &a, const CVar &b) { +inline bool operator==(const CVar &a, const CVar &b) +{ if (a.uid != b.uid) return false; @@ -256,7 +258,8 @@ inline bool operator==(const CVar &a, const CVar &b) { return a.inst == b.inst; } -inline bool operator!=(const CVar &a, const CVar &b) { +inline bool operator!=(const CVar &a, const CVar &b) +{ return !operator==(a, b); } @@ -280,7 +283,8 @@ typedef std::map TUniBlockMap; * lexicographical comparison of CVar objects * @note we need it in order to place the objects into ordered containers */ -inline bool operator<(const CVar &a, const CVar &b) { +inline bool operator<(const CVar &a, const CVar &b) +{ if (a.uid < b.uid) return true; else if (b.uid < a.uid) @@ -645,7 +649,8 @@ class ObjHandle { }; /// this allows to insert ObjHandle instances into std::set -inline bool operator<(const ObjHandle &a, const ObjHandle &b) { +inline bool operator<(const ObjHandle &a, const ObjHandle &b) +{ if (a.sh() < b.sh()) return true; @@ -655,12 +660,14 @@ inline bool operator<(const ObjHandle &a, const ObjHandle &b) { return (a.objId() < b.objId()); } -inline bool operator==(const ObjHandle &a, const ObjHandle &b) { +inline bool operator==(const ObjHandle &a, const ObjHandle &b) +{ return (a.sh() == b.sh()) && (a.objId() == b.objId()); } -inline bool operator!=(const ObjHandle &a, const ObjHandle &b) { +inline bool operator!=(const ObjHandle &a, const ObjHandle &b) +{ return !operator==(a, b); } @@ -691,7 +698,8 @@ enum EObjKind { OK_SEE_THROUGH_2N ///< OK_SEE_THROUGH with two next pointers }; -inline bool isMayExistObj(const enum EObjKind kind) { +inline bool isMayExistObj(const enum EObjKind kind) +{ switch (kind) { case OK_OBJ_OR_NULL: case OK_SEE_THROUGH: diff --git a/sl/symjoin.cc b/sl/symjoin.cc index f6d6340e0..1aa409935 100644 --- a/sl/symjoin.cc +++ b/sl/symjoin.cc @@ -49,7 +49,8 @@ static bool debuggingSymJoin = static_cast(DEBUG_SYMJOIN); CL_DEBUG("SymJoin: " << __VA_ARGS__); \ } while (0) -void debugSymJoin(const bool enable) { +void debugSymJoin(const bool enable) +{ if (enable == ::debuggingSymJoin) return; @@ -122,7 +123,8 @@ struct SchedItem { }; // needed by std::set -inline bool operator<(const SchedItem &a, const SchedItem &b) { +inline bool operator<(const SchedItem &a, const SchedItem &b) +{ if (a.v1 < b.v1) return true; if (b.v1 < a.v1) @@ -223,7 +225,8 @@ struct SymJoinCtx { }; /// handy when debugging -void dump_ctx(const SymJoinCtx &ctx) { +void dump_ctx(const SymJoinCtx &ctx) +{ using std::cout; // plot heaps @@ -273,7 +276,8 @@ void dump_ctx(const SymJoinCtx &ctx) { } /// update ctx.status according to action -bool updateJoinStatus(SymJoinCtx &ctx, const EJoinStatus action) { +bool updateJoinStatus(SymJoinCtx &ctx, const EJoinStatus action) +{ if (JS_USE_ANY == action) return true; @@ -479,7 +483,8 @@ bool joinValuesByCode( const TValId v1, const TValId v2); -bool bumpNestingLevel(const ObjHandle &obj) { +bool bumpNestingLevel(const ObjHandle &obj) +{ if (!obj.isValid()) return false; @@ -612,7 +617,8 @@ struct ObjJoinVisitor { }; template -void dlSegBlackListPrevPtr(TDst &dst, SymHeap &sh, TValId root) { +void dlSegBlackListPrevPtr(TDst &dst, SymHeap &sh, TValId root) +{ const EObjKind kind = sh.valTargetKind(root); if (OK_DLS != kind) return; @@ -1311,7 +1317,8 @@ bool dlSegHandleShared( return true; } -bool joinReturnAddrs(SymJoinCtx &ctx) { +bool joinReturnAddrs(SymJoinCtx &ctx) +{ TObjType clt; const TObjType clt1 = ctx.sh1.valLastKnownTypeOfTarget(VAL_ADDR_OF_RET); const TObjType clt2 = ctx.sh2.valLastKnownTypeOfTarget(VAL_ADDR_OF_RET); @@ -2144,7 +2151,8 @@ bool mayExistFallback( return result; } -EValueOrigin joinOrigin(const EValueOrigin vo1, const EValueOrigin vo2) { +EValueOrigin joinOrigin(const EValueOrigin vo1, const EValueOrigin vo2) +{ if (vo1 == vo2) // use any return vo2; @@ -2252,7 +2260,8 @@ bool joinValuesByCode( return true; } -bool joinValuePair(SymJoinCtx &ctx, const SchedItem &item) { +bool joinValuePair(SymJoinCtx &ctx, const SchedItem &item) +{ const TValId v1 = item.v1; const TValId v2 = item.v2; if (checkValueMapping(ctx, v1, v2, /* allowUnknownMapping */ false)) @@ -2285,7 +2294,8 @@ bool joinValuePair(SymJoinCtx &ctx, const SchedItem &item) { || mayExistFallback(ctx, item, JS_USE_SH2); } -bool joinPendingValues(SymJoinCtx &ctx) { +bool joinPendingValues(SymJoinCtx &ctx) +{ SchedItem item; while (ctx.wl.next(item)) { SJ_DEBUG("--- " << SJ_VALP(item.v1, item.v2)); @@ -2336,7 +2346,8 @@ class JoinVarVisitor { } }; -bool joinCVars(SymJoinCtx &ctx, const JoinVarVisitor::EMode mode) { +bool joinCVars(SymJoinCtx &ctx, const JoinVarVisitor::EMode mode) +{ SymHeap *const heaps[] = { &ctx.dst, &ctx.sh1, @@ -2454,7 +2465,8 @@ bool setDstValuesCore( return true; } -bool setDstValues(SymJoinCtx &ctx, const TObjSet *blackList = 0) { +bool setDstValues(SymJoinCtx &ctx, const TObjSet *blackList = 0) +{ SymHeap &dst = ctx.dst; SymHeap &sh1 = ctx.sh1; SymHeap &sh2 = ctx.sh2; @@ -2560,7 +2572,8 @@ struct MayExistLevelUpdater { } }; -bool updateMayExistLevels(SymJoinCtx &ctx) { +bool updateMayExistLevels(SymJoinCtx &ctx) +{ TValList dstRoots; ctx.dst.gatherRootObjects(dstRoots, isOnHeap); BOOST_FOREACH(const TValId rootDst, dstRoots) { @@ -2591,7 +2604,8 @@ bool matchPreds( && sh2.matchPreds(sh1, vMap[/* rtl */ 1]); } -bool handleDstPreds(SymJoinCtx &ctx) { +bool handleDstPreds(SymJoinCtx &ctx) +{ // go through all segments and initialize minLength BOOST_FOREACH(SymJoinCtx::TSegLengths::const_reference ref, ctx.segLengths) { @@ -2633,7 +2647,8 @@ bool handleDstPreds(SymJoinCtx &ctx) { return true; } -bool validateStatus(const SymJoinCtx &ctx) { +bool validateStatus(const SymJoinCtx &ctx) +{ if (ctx.allowThreeWay || (JS_THREE_WAY != ctx.status)) return true; @@ -2742,7 +2757,8 @@ void mapGhostAddressSpace( } /// this runs only in debug build -bool dlSegCheckProtoConsistency(const SymJoinCtx &ctx) { +bool dlSegCheckProtoConsistency(const SymJoinCtx &ctx) +{ BOOST_FOREACH(const TValId proto, ctx.protoRoots) { if (OK_DLS != ctx.dst.valTargetKind(proto)) // we are interested only DLSs here @@ -2945,7 +2961,8 @@ void recoverPrototypes( } } -void restorePrototypeLengths(SymJoinCtx &ctx) { +void restorePrototypeLengths(SymJoinCtx &ctx) +{ CL_BREAK_IF(!ctx.joiningDataReadWrite()); SymHeap &sh = ctx.dst; diff --git a/sl/symjoin.hh b/sl/symjoin.hh index 8fb101c9a..071c37fc8 100644 --- a/sl/symjoin.hh +++ b/sl/symjoin.hh @@ -37,7 +37,8 @@ enum EJoinStatus { JS_THREE_WAY }; -inline std::ostream& operator<<(std::ostream &str, const EJoinStatus status) { +inline std::ostream& operator<<(std::ostream &str, const EJoinStatus status) +{ switch (status) { case JS_USE_ANY: return (str << "JS_USE_ANY" ); case JS_USE_SH1: return (str << "JS_USE_SH1" ); diff --git a/sl/sympath.cc b/sl/sympath.cc index c70d4c5f1..14db7dca9 100644 --- a/sl/sympath.cc +++ b/sl/sympath.cc @@ -58,7 +58,8 @@ const struct cl_loc* digBlockLocationCore(TBlock bb, bool backward) return 0; } -const struct cl_loc* digBlockLocation(TBlock bb, bool backward) { +const struct cl_loc* digBlockLocation(TBlock bb, bool backward) +{ const struct cl_loc *loc = digBlockLocationCore(bb, backward); if (loc) return loc; @@ -97,7 +98,8 @@ const struct cl_loc* digBlockLocation(TBlock bb, bool backward) { } /// print one item of the path trace -void printOneBlock(TBlock bb, int level, bool backward, bool loop = false) { +void printOneBlock(TBlock bb, int level, bool backward, bool loop = false) +{ using std::string; const struct cl_loc *lw = digBlockLocation(bb, backward); @@ -129,7 +131,8 @@ struct PStackItem { } }; -void PathTracer::printPaths() const { +void PathTracer::printPaths() const +{ if (!block_) // no idea where to start, giving up... return; diff --git a/sl/symplot.cc b/sl/symplot.cc index 9beb3b8cc..4bf825475 100644 --- a/sl/symplot.cc +++ b/sl/symplot.cc @@ -63,7 +63,8 @@ struct PlotData { } }; -void dlSegJumpToBegIfNeeded(const SymHeap &sh, TValId *pRoot) { +void dlSegJumpToBegIfNeeded(const SymHeap &sh, TValId *pRoot) +{ const TValId root = *pRoot; if (isDlSegPeer(sh, root)) *pRoot = dlSegPeer(sh, root); @@ -111,7 +112,8 @@ void digValues(PlotData &plot, const TValList &startingPoints, bool digForward) } } -inline const char* offPrefix(const TOffset off) { +inline const char* offPrefix(const TOffset off) +{ return (off < 0) ? "" : "+"; @@ -119,7 +121,8 @@ inline const char* offPrefix(const TOffset off) { #define SIGNED_OFF(off) offPrefix(off) << (off) -inline void appendLabelIf(std::ostream &str, const char *label) { +inline void appendLabelIf(std::ostream &str, const char *label) +{ if (!label) return; @@ -197,7 +200,8 @@ bool digIcByOffset( return true; } -void describeVar(PlotData &plot, const TValId rootAt) { +void describeVar(PlotData &plot, const TValId rootAt) +{ if (VAL_ADDR_OF_RET == rootAt) { plot.out << "OBJ_RETURN"; return; @@ -259,7 +263,8 @@ void describeFieldPlacement(PlotData &plot, const ObjHandle &obj, TObjType clt) } } -void describeObject(PlotData &plot, const ObjHandle &obj, const bool lonely) { +void describeObject(PlotData &plot, const ObjHandle &obj, const bool lonely) +{ SymHeap &sh = plot.sh; // check root @@ -296,7 +301,8 @@ void printRawRange( str << ", alignment = " << rng.alignment << suffix; } -void plotRootValue(PlotData &plot, const TValId val, const char *color) { +void plotRootValue(PlotData &plot, const TValId val, const char *color) +{ SymHeap &sh = plot.sh; const TSizeRange size = sh.valSizeOfTarget(val); const unsigned refCnt = sh.usedByCount(val); @@ -460,7 +466,8 @@ bool plotAtomicObj(PlotData &plot, const AtomicObject &ao, const bool lonely) return true; } -void plotUniformBlocks(PlotData &plot, const TValId root) { +void plotUniformBlocks(PlotData &plot, const TValId root) +{ SymHeap &sh = plot.sh; // get all uniform blocks inside the given root @@ -714,7 +721,8 @@ void plotCompositeObj(PlotData &plot, const TValId at, const TCont &liveObjs) plot.out << "}\n"; } -bool plotSimpleRoot(PlotData &plot, const ObjHandle &obj) { +bool plotSimpleRoot(PlotData &plot, const ObjHandle &obj) +{ SymHeap &sh = plot.sh; const TValId at = obj.placedAt(); @@ -742,7 +750,8 @@ bool plotSimpleRoot(PlotData &plot, const ObjHandle &obj) { return true; } -void plotRootObjects(PlotData &plot) { +void plotRootObjects(PlotData &plot) +{ SymHeap &sh = plot.sh; // go through roots @@ -773,7 +782,8 @@ void plotRootObjects(PlotData &plot) { #define GEN_labelByCode(cst) case cst: return #cst -const char* labelByOrigin(const EValueOrigin code) { +const char* labelByOrigin(const EValueOrigin code) +{ switch (code) { GEN_labelByCode(VO_INVALID); GEN_labelByCode(VO_ASSIGNED); @@ -788,7 +798,8 @@ const char* labelByOrigin(const EValueOrigin code) { return ""; } -const char* labelByTarget(const EValueTarget code) { +const char* labelByTarget(const EValueTarget code) +{ switch (code) { GEN_labelByCode(VT_INVALID); GEN_labelByCode(VT_UNKNOWN); @@ -807,7 +818,8 @@ const char* labelByTarget(const EValueTarget code) { return ""; } -void describeInt(PlotData &plot, const IR::TInt num, const TValId val) { +void describeInt(PlotData &plot, const IR::TInt num, const TValId val) +{ plot.out << ", fontcolor=red, label=\"[int] " << num; if (IR::Int0 < num && num < UCHAR_MAX && isprint(num)) plot.out << " = '" << static_cast(num) << "'"; @@ -815,7 +827,8 @@ void describeInt(PlotData &plot, const IR::TInt num, const TValId val) { plot.out << " (#" << val << ")\""; } -void describeIntRange(PlotData &plot, const IR::Range &rng, const TValId val) { +void describeIntRange(PlotData &plot, const IR::Range &rng, const TValId val) +{ plot.out << ", fontcolor=blue, label=\"[int range] "; printRawRange(plot.out, rng); @@ -823,13 +836,15 @@ void describeIntRange(PlotData &plot, const IR::Range &rng, const TValId val) { plot.out << " (#" << val << ")\""; } -void describeReal(PlotData &plot, const float fpn, const TValId val) { +void describeReal(PlotData &plot, const float fpn, const TValId val) +{ plot.out << ", fontcolor=red, label=\"[real] " << fpn << " (#" << val << ")\""; } -void describeFnc(PlotData &plot, const int uid, const TValId val) { +void describeFnc(PlotData &plot, const int uid, const TValId val) +{ TStorRef stor = plot.sh.stor(); const CodeStorage::Fnc *fnc = stor.fncs[uid]; CL_BREAK_IF(!fnc); @@ -840,14 +855,16 @@ void describeFnc(PlotData &plot, const int uid, const TValId val) { << val << ")\""; } -void describeStr(PlotData &plot, const std::string &str, const TValId val) { +void describeStr(PlotData &plot, const std::string &str, const TValId val) +{ // we need to escape twice, once for the C compiler and once for graphviz plot.out << ", fontcolor=blue, label=\"\\\"" << str << "\\\" (#" << val << ")\""; } -void describeCustomValue(PlotData &plot, const TValId val) { +void describeCustomValue(PlotData &plot, const TValId val) +{ SymHeap &sh = plot.sh; const CustomValue cVal = sh.valUnwrapCustom(val); @@ -974,7 +991,8 @@ void plotValue(PlotData &plot, const TValId val) plot.out << "\"];\n"; } -void plotPointsTo(PlotData &plot, const TValId val, const TObjId target) { +void plotPointsTo(PlotData &plot, const TValId val, const TObjId target) +{ plot.out << "\t" << SL_QUOTE(val) << " -> " << SL_QUOTE(target) << " [color=green, fontcolor=green];\n"; @@ -995,7 +1013,8 @@ void plotRangePtr(PlotData &plot, TValId val, TValId root, const IR::Range &rng) plot.out << "]\"];\n"; } -void plotNonRootValues(PlotData &plot) { +void plotNonRootValues(PlotData &plot) +{ SymHeap &sh = plot.sh; // go through non-roots @@ -1050,7 +1069,8 @@ void plotNonRootValues(PlotData &plot) { } } -const char* valNullLabel(const SymHeapCore &sh, const TObjId obj) { +const char* valNullLabel(const SymHeapCore &sh, const TObjId obj) +{ const ObjHandle hdl(const_cast(sh), obj); const TObjType clt = hdl.objType(); if (!clt) @@ -1194,7 +1214,8 @@ void plotHasValueFlat( plotHasValue(plot, root, val, /* isObj */ false, label.c_str()); } -void plotNeqZero(PlotData &plot, const TValId val) { +void plotNeqZero(PlotData &plot, const TValId val) +{ const int id = ++plot.last; plot.out << "\t" << SL_QUOTE("lonely" << id) << " [shape=plaintext, fontcolor=blue, label=NULL];\n"; @@ -1205,7 +1226,8 @@ void plotNeqZero(PlotData &plot, const TValId val) { ", penwidth=2.0];\n"; } -void plotNeqCustom(PlotData &plot, const TValId val, const TValId valCustom) { +void plotNeqCustom(PlotData &plot, const TValId val, const TValId valCustom) +{ const int id = ++plot.last; plot.out << "\t" << SL_QUOTE("lonely" << id) << " [shape=plaintext"; @@ -1218,7 +1240,8 @@ void plotNeqCustom(PlotData &plot, const TValId val, const TValId valCustom) { ", penwidth=2.0];\n"; } -void plotNeq(std::ostream &out, const TValId v1, const TValId v2) { +void plotNeq(std::ostream &out, const TValId v1, const TValId v2) +{ out << "\t" << SL_QUOTE(v1) << " -> " << SL_QUOTE(v2) << " [color=red, style=dashed, penwidth=2.0, arrowhead=none" @@ -1244,7 +1267,8 @@ class NeqPlotter: public SymPairSet { } }; -void plotNeqEdges(PlotData &plot) { +void plotNeqEdges(PlotData &plot) +{ // cppcheck-suppress unreachableCode SymHeap &sh = plot.sh; @@ -1271,7 +1295,8 @@ void plotNeqEdges(PlotData &plot) { np.plotNeqEdges(plot); } -void plotFlatEdges(PlotData &plot) { +void plotFlatEdges(PlotData &plot) +{ SymHeap &sh = plot.sh; // plot "hasValue" edges @@ -1317,7 +1342,8 @@ void plotFlatEdges(PlotData &plot) { } } -void plotHasValueEdges(PlotData &plot) { +void plotHasValueEdges(PlotData &plot) +{ // plot "hasValue" edges BOOST_FOREACH(PlotData::TLiveObjs::const_reference item, plot.liveObjs) BOOST_FOREACH(const ObjHandle &obj, /* ObjList */ item.second) @@ -1339,7 +1365,8 @@ void plotHasValueEdges(PlotData &plot) { } } -void plotEverything(PlotData &plot) { +void plotEverything(PlotData &plot) +{ plotRootObjects(plot); plotNonRootValues(plot); diff --git a/sl/symproc.cc b/sl/symproc.cc index 6e2342f33..d76ad89b0 100644 --- a/sl/symproc.cc +++ b/sl/symproc.cc @@ -48,7 +48,8 @@ // ///////////////////////////////////////////////////////////////////////////// // SymProc implementation -void SymProc::printBackTrace(EMsgLevel level, bool forcePtrace) { +void SymProc::printBackTrace(EMsgLevel level, bool forcePtrace) +{ // update trace graph Trace::MsgNode *trMsg = new Trace::MsgNode(sh_.traceNode(), level, lw_); sh_.traceUpdate(trMsg); @@ -81,7 +82,8 @@ void SymProc::printBackTrace(EMsgLevel level, bool forcePtrace) { #endif } -bool SymProc::hasFatalError() const { +bool SymProc::hasFatalError() const +{ #if 1 < SE_ERROR_RECOVERY_MODE // full error recovery mode return false; @@ -90,7 +92,8 @@ bool SymProc::hasFatalError() const { #endif } -TValId SymProc::valFromCst(const struct cl_operand &op) { +TValId SymProc::valFromCst(const struct cl_operand &op) +{ const struct cl_cst &cst = op.data.cst; CustomValue cv; @@ -166,7 +169,8 @@ void describeUnknownVal( CL_BREAK_IF("valOrigin out of range?"); } -const char* describeRootObj(const EValueTarget code) { +const char* describeRootObj(const EValueTarget code) +{ switch (code) { case VT_STATIC: return "a static variable"; @@ -232,7 +236,8 @@ void reportDerefOutOfBounds( } } -bool SymProc::checkForInvalidDeref(TValId val, const TSizeOf sizeOfTarget) { +bool SymProc::checkForInvalidDeref(TValId val, const TSizeOf sizeOfTarget) +{ if (VAL_NULL == val) { CL_ERROR_MSG(lw_, "dereference of NULL value"); return true; @@ -290,7 +295,8 @@ bool SymProc::checkForInvalidDeref(TValId val, const TSizeOf sizeOfTarget) { return false; } -void SymProc::varInit(TValId at) { +void SymProc::varInit(TValId at) +{ const CVar cv = sh_.cVarByRoot(at); const CodeStorage::Storage &stor = sh_.stor(); const CodeStorage::Var &var = stor.vars[cv.uid]; @@ -315,7 +321,8 @@ void SymProc::varInit(TValId at) { } } -TValId SymProc::varAt(const CVar &cv) { +TValId SymProc::varAt(const CVar &cv) +{ TValId at = sh_.addrOfVar(cv, /* createIfNeeded */ false); if (0 < at) // var already alive @@ -356,7 +363,8 @@ TValId SymProc::varAt(const CVar &cv) { return at; } -TValId SymProc::varAt(const struct cl_operand &op) { +TValId SymProc::varAt(const struct cl_operand &op) +{ // resolve CVar const int uid = varIdFromOperand(&op); const int nestLevel = bt_->countOccurrencesOfTopFnc(); @@ -381,7 +389,8 @@ bool addOffDerefArray(SymProc &proc, TOffset &off, const struct cl_accessor *ac) return true; } -TOffset offItem(const struct cl_accessor *ac) { +TOffset offItem(const struct cl_accessor *ac) +{ const int id = ac->data.item.id; const TObjType clt = ac->type; CL_BREAK_IF(!clt || clt->item_cnt <= id); @@ -389,7 +398,8 @@ TOffset offItem(const struct cl_accessor *ac) { return clt->items[id].offset; } -TValId SymProc::targetAt(const struct cl_operand &op) { +TValId SymProc::targetAt(const struct cl_operand &op) +{ // resolve program variable TValId addr = this->varAt(op); const struct cl_accessor *ac = op.accessor; @@ -450,7 +460,8 @@ TValId SymProc::targetAt(const struct cl_operand &op) { return sh_.valByOffset(addr, off); } -ObjHandle SymProc::objByOperand(const struct cl_operand &op) { +ObjHandle SymProc::objByOperand(const struct cl_operand &op) +{ CL_BREAK_IF(seekRefAccessor(op.accessor)); // resolve address of the target object @@ -476,7 +487,8 @@ ObjHandle SymProc::objByOperand(const struct cl_operand &op) { return obj; } -TValId SymProc::valFromObj(const struct cl_operand &op) { +TValId SymProc::valFromObj(const struct cl_operand &op) +{ if (seekRefAccessor(op.accessor)) return this->targetAt(op); @@ -498,7 +510,8 @@ TValId SymProc::valFromObj(const struct cl_operand &op) { } } -TValId SymProc::valFromOperand(const struct cl_operand &op) { +TValId SymProc::valFromOperand(const struct cl_operand &op) +{ const enum cl_operand_e code = op.code; switch (code) { case CL_OPERAND_VAR: @@ -513,7 +526,8 @@ TValId SymProc::valFromOperand(const struct cl_operand &op) { } } -bool SymProc::fncFromOperand(int *pUid, const struct cl_operand &op) { +bool SymProc::fncFromOperand(int *pUid, const struct cl_operand &op) +{ if (fncUidFromOperand(pUid, &op)) return true; @@ -532,7 +546,8 @@ bool SymProc::fncFromOperand(int *pUid, const struct cl_operand &op) { return true; } -void digRootTypeInfo(SymHeap &sh, const ObjHandle &lhs, TValId rhs) { +void digRootTypeInfo(SymHeap &sh, const ObjHandle &lhs, TValId rhs) +{ const EValueTarget code = sh.valTarget(rhs); if (!isPossibleToDeref(code)) // no valid target anyway @@ -565,7 +580,8 @@ void digRootTypeInfo(SymHeap &sh, const ObjHandle &lhs, TValId rhs) { sh.valSetLastKnownTypeOfTarget(rhs, cltTarget); } -void reportMemLeak(SymProc &proc, const EValueTarget code, const char *reason) { +void reportMemLeak(SymProc &proc, const EValueTarget code, const char *reason) +{ const struct cl_loc *loc = proc.lw(); const char *const what = describeRootObj(code); CL_WARN_MSG(loc, "memory leak detected while " << reason << "ing " << what); @@ -613,7 +629,8 @@ TValId ptrObjectEncoderCore( return sh.valCreate(VT_UNKNOWN, VO_REINTERPRET); } -TValId ptrObjectEncoder(SymProc &proc, const ObjHandle &dst, TValId val) { +TValId ptrObjectEncoder(SymProc &proc, const ObjHandle &dst, TValId val) +{ return ptrObjectEncoderCore(proc, dst, val, PK_DATA); } @@ -673,7 +690,8 @@ TValId integralEncoder( return proc.sh().valWrapCustom(cv); } -TValId customValueEncoder(SymProc &proc, const ObjHandle &dst, TValId val) { +TValId customValueEncoder(SymProc &proc, const ObjHandle &dst, TValId val) +{ SymHeap &sh = proc.sh(); const CustomValue cv = sh.valUnwrapCustom(val); const ECustomValue code = cv.code(); @@ -700,7 +718,8 @@ TValId customValueEncoder(SymProc &proc, const ObjHandle &dst, TValId val) { return VAL_INVALID; } -void objSetAtomicVal(SymProc &proc, const ObjHandle &lhs, TValId rhs) { +void objSetAtomicVal(SymProc &proc, const ObjHandle &lhs, TValId rhs) +{ if (!lhs.isValid()) { CL_ERROR_MSG(proc.lw(), "invalid L-value"); proc.printBackTrace(ML_ERROR); @@ -742,7 +761,8 @@ void objSetAtomicVal(SymProc &proc, const ObjHandle &lhs, TValId rhs) { lm.leave(); } -void SymProc::objSetValue(const ObjHandle &lhs, TValId rhs) { +void SymProc::objSetValue(const ObjHandle &lhs, TValId rhs) +{ const TValId lhsAt = lhs.placedAt(); CL_BREAK_IF(!isPossibleToDeref(sh_.valTarget(lhsAt))); @@ -781,7 +801,8 @@ void SymProc::objSetValue(const ObjHandle &lhs, TValId rhs) { executeMemmove(*this, lhsAt, rhsAt, valSize, /* allowOverlap */ false); } -void SymProc::valDestroyTarget(TValId addr) { +void SymProc::valDestroyTarget(TValId addr) +{ const EValueTarget code = sh_.valTarget(addr); if (VAL_ADDR_OF_RET == addr && isGone(code)) return; @@ -795,7 +816,8 @@ void SymProc::valDestroyTarget(TValId addr) { lm.leave(); } -void SymProc::killVar(const CodeStorage::KillVar &kv) { +void SymProc::killVar(const CodeStorage::KillVar &kv) +{ const int nestLevel = bt_->countOccurrencesOfTopFnc(); const CVar cVar(kv.uid, nestLevel); const TValId addr = sh_.addrOfVar(cVar, /* createIfNeeded */ false); @@ -845,13 +867,15 @@ void SymProc::killVar(const CodeStorage::KillVar &kv) { lm.leave(); } -bool headingToAbort(const CodeStorage::Block *bb) { +bool headingToAbort(const CodeStorage::Block *bb) +{ const CodeStorage::Insn *term = bb->back(); const cl_insn_e code = term->code; return (CL_INSN_ABORT == code); } -void SymProc::killInsn(const CodeStorage::Insn &insn) { +void SymProc::killInsn(const CodeStorage::Insn &insn) +{ using namespace CodeStorage; #if !SE_EARLY_VARS_DESTRUCTION return; @@ -861,7 +885,8 @@ void SymProc::killInsn(const CodeStorage::Insn &insn) { this->killVar(kv); } -void SymProc::killPerTarget(const CodeStorage::Insn &insn, unsigned target) { +void SymProc::killPerTarget(const CodeStorage::Insn &insn, unsigned target) +{ using namespace CodeStorage; #if SE_EARLY_VARS_DESTRUCTION if (headingToAbort(insn.targets[target])) @@ -928,7 +953,8 @@ void execMemsetCore( } } -inline void wipeAlignment(IR::Range &rng) { +inline void wipeAlignment(IR::Range &rng) +{ CL_BREAK_IF(isAligned(rng)); rng.alignment = IR::Int1; } @@ -1078,7 +1104,8 @@ void executeMemmove( // ///////////////////////////////////////////////////////////////////////////// // SymExecCore implementation -void SymExecCore::varInit(TValId at) { +void SymExecCore::varInit(TValId at) +{ if (ep_.trackUninit && VT_ON_STACK == sh_.valTarget(at)) { // uninitialized stack variable const TValId tpl = sh_.valCreate(VT_UNKNOWN, VO_STACK); @@ -1090,7 +1117,8 @@ void SymExecCore::varInit(TValId at) { SymProc::varInit(at); } -void SymExecCore::execFree(TValId val) { +void SymExecCore::execFree(TValId val) +{ if (VAL_NULL == val) { CL_DEBUG_MSG(lw_, "ignoring free() called with NULL value"); return; @@ -1222,7 +1250,8 @@ malloc/calloc is implementation-defined"); dst.insert(sh_); } -bool describeCmpOp(CmpOpTraits *pTraits, const enum cl_binop_e code) { +bool describeCmpOp(CmpOpTraits *pTraits, const enum cl_binop_e code) +{ memset(pTraits, 0, sizeof *pTraits); switch (code) { @@ -1528,7 +1557,8 @@ bool spliceOutAbstractPath( return false; } -bool valMerge(SymState &dst, SymProc &proc, TValId v1, TValId v2) { +bool valMerge(SymState &dst, SymProc &proc, TValId v1, TValId v2) +{ SymHeap &sh = proc.sh(); const struct cl_loc *loc = proc.lw(); @@ -1803,7 +1833,8 @@ TValId handleIntegralOp( return sh.valCreate(VT_UNKNOWN, VO_UNKNOWN); } -TValId handleBitNot(SymHeapCore &sh, const TValId val) { +TValId handleBitNot(SymHeapCore &sh, const TValId val) +{ // check whether the value is an integral constant IR::TInt num; if (!numFromVal(&num, sh, val)) @@ -1835,7 +1866,8 @@ TValId handleIntegralOp( } } -bool isAnyIntValue(const SymHeapCore &sh, const TValId val) { +bool isAnyIntValue(const SymHeapCore &sh, const TValId val) +{ const TValId root = sh.valRoot(val); switch (root) { case VAL_NULL: @@ -2063,7 +2095,8 @@ struct OpHandler { }; template -void SymExecCore::execOp(const CodeStorage::Insn &insn) { +void SymExecCore::execOp(const CodeStorage::Insn &insn) +{ // resolve lhs ObjHandle lhs; const struct cl_operand &dst = insn.operands[/* dst */ 0]; @@ -2116,7 +2149,8 @@ void SymExecCore::execOp(const CodeStorage::Insn &insn) { this->objSetValue(lhs, valResult); } -void SymExecCore::handleLabel(const CodeStorage::Insn &insn) { +void SymExecCore::handleLabel(const CodeStorage::Insn &insn) +{ const struct cl_operand &op = insn.operands[/* name */ 0]; if (CL_OPERAND_VOID == op.code) // anonymous label @@ -2261,7 +2295,8 @@ bool SymExecCore::concretizeLoop( return true; } -bool SymExecCore::exec(SymState &dst, const CodeStorage::Insn &insn) { +bool SymExecCore::exec(SymState &dst, const CodeStorage::Insn &insn) +{ TOpIdxList derefs; const cl_insn_e code = insn.code; diff --git a/sl/symproc.hh b/sl/symproc.hh index 96481fde1..32f669b35 100644 --- a/sl/symproc.hh +++ b/sl/symproc.hh @@ -47,7 +47,8 @@ struct CmpOpTraits { bool describeCmpOp(CmpOpTraits *pTraits, const enum cl_binop_e code); -inline bool areComparableTypes(const TObjType clt1, const TObjType clt2) { +inline bool areComparableTypes(const TObjType clt1, const TObjType clt2) +{ if (!clt1 || !clt2) return false; diff --git a/sl/symseg.cc b/sl/symseg.cc index cde9550d2..c909a7154 100644 --- a/sl/symseg.cc +++ b/sl/symseg.cc @@ -28,7 +28,8 @@ #include -bool segProveNeq(const SymHeap &sh, TValId ref, TValId val) { +bool segProveNeq(const SymHeap &sh, TValId ref, TValId val) +{ if (proveNeq(sh, ref, val)) // values are non-equal in non-abstract world return true; @@ -120,7 +121,8 @@ bool haveSeg( return (valNext == pointingTo); } -bool haveDlSegAt(const SymHeap &sh, TValId atAddr, TValId peerAddr) { +bool haveDlSegAt(const SymHeap &sh, TValId atAddr, TValId peerAddr) +{ if (atAddr <= 0 || peerAddr <= 0) // no valid targets return false; @@ -165,7 +167,8 @@ bool haveSegBidir( return false; } -bool segApplyNeq(SymHeap &sh, TValId v1, TValId v2) { +bool segApplyNeq(SymHeap &sh, TValId v1, TValId v2) +{ const EValueTarget code1 = sh.valTarget(v1); const EValueTarget code2 = sh.valTarget(v2); if (!isAbstract(code1) && !isAbstract(code2)) @@ -207,7 +210,8 @@ bool segApplyNeq(SymHeap &sh, TValId v1, TValId v2) { return false; } -TValId segClone(SymHeap &sh, const TValId root) { +TValId segClone(SymHeap &sh, const TValId root) +{ const TValId dup = objClone(sh, root); if (OK_DLS == sh.valTargetKind(root)) { @@ -231,7 +235,8 @@ TValId segClone(SymHeap &sh, const TValId root) { return dup; } -TValId lookThrough(const SymHeap &sh, TValId val, TValSet *pSeen) { +TValId lookThrough(const SymHeap &sh, TValId val, TValSet *pSeen) +{ if (VT_RANGE == sh.valTarget(val)) // not supported yet return VAL_INVALID; @@ -270,7 +275,8 @@ TValId lookThrough(const SymHeap &sh, TValId val, TValSet *pSeen) { return val; } -bool dlSegCheckConsistency(const SymHeap &sh) { +bool dlSegCheckConsistency(const SymHeap &sh) +{ TValList addrs; sh.gatherRootObjects(addrs, isAbstract); BOOST_FOREACH(const TValId at, addrs) { diff --git a/sl/symseg.hh b/sl/symseg.hh index 333647359..a48da53f6 100644 --- a/sl/symseg.hh +++ b/sl/symseg.hh @@ -52,7 +52,8 @@ bool haveSeg( bool haveDlSegAt(const SymHeap &sh, TValId atAddr, TValId peerAddr); /// return 'next' pointer in the given segment (given by root) -inline PtrHandle nextPtrFromSeg(const SymHeap &sh, TValId seg) { +inline PtrHandle nextPtrFromSeg(const SymHeap &sh, TValId seg) +{ CL_BREAK_IF(sh.valOffset(seg)); CL_BREAK_IF(VT_ABSTRACT != sh.valTarget(seg)); @@ -62,7 +63,8 @@ inline PtrHandle nextPtrFromSeg(const SymHeap &sh, TValId seg) { } /// return 'prev' pointer in the given segment (given by root) -inline PtrHandle prevPtrFromSeg(const SymHeap &sh, TValId seg) { +inline PtrHandle prevPtrFromSeg(const SymHeap &sh, TValId seg) +{ CL_BREAK_IF(sh.valOffset(seg)); CL_BREAK_IF(VT_ABSTRACT != sh.valTarget(seg)); @@ -72,7 +74,8 @@ inline PtrHandle prevPtrFromSeg(const SymHeap &sh, TValId seg) { } /// return the value of 'next' in the given segment (given by root) -inline TValId nextValFromSeg(const SymHeap &sh, TValId seg) { +inline TValId nextValFromSeg(const SymHeap &sh, TValId seg) +{ if (OK_OBJ_OR_NULL == sh.valTargetKind(seg)) return VAL_NULL; @@ -81,7 +84,8 @@ inline TValId nextValFromSeg(const SymHeap &sh, TValId seg) { } /// return DLS peer object of the given DLS -inline TValId dlSegPeer(const SymHeap &sh, TValId dls) { +inline TValId dlSegPeer(const SymHeap &sh, TValId dls) +{ CL_BREAK_IF(sh.valOffset(dls)); CL_BREAK_IF(OK_DLS != sh.valTargetKind(dls)); const BindingOff &off = sh.segBinding(dls); @@ -90,7 +94,8 @@ inline TValId dlSegPeer(const SymHeap &sh, TValId dls) { } /// return DLS peer object in case of DLS, the given value otherwise -inline TValId segPeer(const SymHeap &sh, TValId seg) { +inline TValId segPeer(const SymHeap &sh, TValId seg) +{ CL_BREAK_IF(sh.valOffset(seg)); CL_BREAK_IF(!isAbstract(sh.valTarget(seg))); return (OK_DLS == sh.valTargetKind(seg)) @@ -99,7 +104,8 @@ inline TValId segPeer(const SymHeap &sh, TValId seg) { } /// return address of segment's head (useful mainly for Linux lists) -inline TValId segHeadAt(const SymHeap &sh, TValId seg) { +inline TValId segHeadAt(const SymHeap &sh, TValId seg) +{ CL_BREAK_IF(sh.valOffset(seg)); CL_BREAK_IF(VT_ABSTRACT != sh.valTarget(seg)); @@ -108,7 +114,8 @@ inline TValId segHeadAt(const SymHeap &sh, TValId seg) { } /// we do NOT require root to be a segment -inline TValId segNextRootObj(SymHeap &sh, TValId at, TOffset offNext) { +inline TValId segNextRootObj(SymHeap &sh, TValId at, TOffset offNext) +{ CL_BREAK_IF(sh.valOffset(at)); if (OK_DLS == sh.valTargetKind(at)) // jump to peer in case of DLS @@ -118,7 +125,8 @@ inline TValId segNextRootObj(SymHeap &sh, TValId at, TOffset offNext) { } /// we DO require the root to be an abstract object -inline TValId segNextRootObj(SymHeap &sh, TValId root) { +inline TValId segNextRootObj(SymHeap &sh, TValId root) +{ CL_BREAK_IF(sh.valOffset(root)); if (OK_OBJ_OR_NULL == sh.valTargetKind(root)) @@ -133,7 +141,8 @@ inline TValId segNextRootObj(SymHeap &sh, TValId root) { } /// true if the given root is a DLS with bf.prev < bf.next -inline bool isDlSegPeer(const SymHeap &sh, const TValId root) { +inline bool isDlSegPeer(const SymHeap &sh, const TValId root) +{ CL_BREAK_IF(sh.valOffset(root)); if (OK_DLS != sh.valTargetKind(root)) @@ -144,7 +153,8 @@ inline bool isDlSegPeer(const SymHeap &sh, const TValId root) { return (bf.prev < bf.next); } -inline TMinLen objMinLength(const SymHeap &sh, TValId root) { +inline TMinLen objMinLength(const SymHeap &sh, TValId root) +{ CL_BREAK_IF(sh.valOffset(root)); const EValueTarget code = sh.valTarget(root); @@ -176,7 +186,8 @@ inline void segIncreaseMinLength(SymHeap &sh, const TValId seg, TMinLen len) /// we know (v1 != v2), update related segments in the given heap accordingly! bool segApplyNeq(SymHeap &sh, TValId v1, TValId v2); -inline bool objWithBinding(const SymHeap &sh, const TValId root) { +inline bool objWithBinding(const SymHeap &sh, const TValId root) +{ CL_BREAK_IF(sh.valOffset(root)); const EValueTarget code = sh.valTarget(root); diff --git a/sl/symstate.cc b/sl/symstate.cc index 08394e38c..499c89377 100644 --- a/sl/symstate.cc +++ b/sl/symstate.cc @@ -65,18 +65,21 @@ namespace { // ///////////////////////////////////////////////////////////////////////////// // SymState implementation -void SymState::clear() { +void SymState::clear() +{ BOOST_FOREACH(SymHeap *sh, heaps_) delete sh; heaps_.clear(); } -SymState::~SymState() { +SymState::~SymState() +{ this->clear(); } -SymState& SymState::operator=(const SymState &ref) { +SymState& SymState::operator=(const SymState &ref) +{ // wipe the existing contents (if any) this->clear(); @@ -87,11 +90,13 @@ SymState& SymState::operator=(const SymState &ref) { return *this; } -SymState::SymState(const SymState &ref) { +SymState::SymState(const SymState &ref) +{ SymState::operator=(ref); } -void SymState::insertNew(const SymHeap &sh) { +void SymState::insertNew(const SymHeap &sh) +{ // clone the given heap SymHeap *dup = new SymHeap(sh); @@ -102,7 +107,8 @@ void SymState::insertNew(const SymHeap &sh) { heaps_.push_back(dup); } -bool SymState::insert(const SymHeap &sh, bool /* allowThreeWay */ ) { +bool SymState::insert(const SymHeap &sh, bool /* allowThreeWay */ ) +{ if (-1 != this->lookup(sh)) return false; @@ -111,7 +117,8 @@ bool SymState::insert(const SymHeap &sh, bool /* allowThreeWay */ ) { return true; } -void SymState::rotateExisting(const int idxA, const int idxB) { +void SymState::rotateExisting(const int idxA, const int idxB) +{ TList::iterator itA = heaps_.begin() + idxA; TList::iterator itB = heaps_.begin() + idxB; rotate(itA, itB, heaps_.end()); @@ -120,7 +127,8 @@ void SymState::rotateExisting(const int idxA, const int idxB) { // ///////////////////////////////////////////////////////////////////////////// // SymHeapUnion implementation -int SymHeapUnion::lookup(const SymHeap &lookFor) const { +int SymHeapUnion::lookup(const SymHeap &lookFor) const +{ const int cnt = this->size(); if (!cnt) // empty state --> not found @@ -154,7 +162,8 @@ int SymHeapUnion::lookup(const SymHeap &lookFor) const { // ///////////////////////////////////////////////////////////////////////////// // SymStateWithJoin implementation -void SymStateWithJoin::packState(unsigned idxNew, bool allowThreeWay) { +void SymStateWithJoin::packState(unsigned idxNew, bool allowThreeWay) +{ for (unsigned idxOld = 0U; idxOld < this->size();) { if (idxNew == idxOld) { // do not remove the newly inserted heap based on identity with self @@ -206,7 +215,8 @@ void SymStateWithJoin::packState(unsigned idxNew, bool allowThreeWay) { #endif } -bool SymStateWithJoin::insert(const SymHeap &shNew, bool allowThreeWay) { +bool SymStateWithJoin::insert(const SymHeap &shNew, bool allowThreeWay) +{ #if 1 < SE_JOIN_ON_LOOP_EDGES_ONLY if (!allowThreeWay) // we are asked not to check for entailment, only isomorphism @@ -322,19 +332,23 @@ BlockScheduler::BlockScheduler(const BlockScheduler &tpl): { } -BlockScheduler::~BlockScheduler() { +BlockScheduler::~BlockScheduler() +{ delete d; } -unsigned BlockScheduler::cntWaiting() const { +unsigned BlockScheduler::cntWaiting() const +{ return d->todo.size(); } -const BlockScheduler::TBlockSet& BlockScheduler::todo() const { +const BlockScheduler::TBlockSet& BlockScheduler::todo() const +{ return d->todo; } -BlockScheduler::TBlockList BlockScheduler::done() const { +BlockScheduler::TBlockList BlockScheduler::done() const +{ TBlockList dst; BOOST_FOREACH(Private::TDone::const_reference item, d->done) dst.push_back(/* bb */ item.first); @@ -342,7 +356,8 @@ BlockScheduler::TBlockList BlockScheduler::done() const { return dst; } -bool BlockScheduler::schedule(const TBlock bb) { +bool BlockScheduler::schedule(const TBlock bb) +{ if (insertOnce(d->todo, bb)) { #if !SE_BLOCK_SCHEDULER_KIND d->sched.push(bb); @@ -384,7 +399,8 @@ bool BlockScheduler::schedule(const TBlock bb) { return false; } -bool BlockScheduler::getNext(TBlock *dst) { +bool BlockScheduler::getNext(TBlock *dst) +{ if (d->todo.empty()) return false; @@ -427,7 +443,8 @@ bool BlockScheduler::getNext(TBlock *dst) { return true; } -void BlockScheduler::printStats() const { +void BlockScheduler::printStats() const +{ typedef std::map TRMap; // sort d->todo by cnt @@ -457,7 +474,8 @@ void BlockScheduler::printStats() const { // ///////////////////////////////////////////////////////////////////////////// // SymStateMarked implementation -void SymStateMarked::swap(SymState &other) { +void SymStateMarked::swap(SymState &other) +{ // if this fires up one day, it means we need to cover the swap of done_ CL_BREAK_IF(dynamic_cast(&other)); @@ -469,7 +487,8 @@ void SymStateMarked::swap(SymState &other) { done_.resize((cntPending_ = this->size()), false); } -void SymStateMarked::rotateExisting(const int idxA, const int idxB) { +void SymStateMarked::rotateExisting(const int idxA, const int idxB) +{ SymState::rotateExisting(idxA, idxB); TDone::iterator itA = done_.begin() + idxA; @@ -510,11 +529,13 @@ SymStateMap::SymStateMap(): { } -SymStateMap::~SymStateMap() { +SymStateMap::~SymStateMap() +{ delete d; } -SymStateMarked& SymStateMap::operator[](const CodeStorage::Block *bb) { +SymStateMarked& SymStateMap::operator[](const CodeStorage::Block *bb) +{ return d->cont[bb].state; } @@ -552,11 +573,13 @@ bool SymStateMap::insert( return changed; } -bool SymStateMap::anyReuseHappened(const CodeStorage::Block *bb) const { +bool SymStateMap::anyReuseHappened(const CodeStorage::Block *bb) const +{ return d->cont[bb].anyHit; } -int SymStateMap::cntPending(const CodeStorage::Block *bb) const { +int SymStateMap::cntPending(const CodeStorage::Block *bb) const +{ return d->cont[bb].state.cntPending(); } diff --git a/sl/symtrace.cc b/sl/symtrace.cc index d915e241f..ded066892 100644 --- a/sl/symtrace.cc +++ b/sl/symtrace.cc @@ -40,12 +40,14 @@ namespace Trace { // ///////////////////////////////////////////////////////////////////////////// // implementation of Trace::NodeBase -NodeBase::~NodeBase() { +NodeBase::~NodeBase() +{ BOOST_FOREACH(Node *parent, parents_) parent->notifyDeath(this); } -Node* NodeBase::parent() const { +Node* NodeBase::parent() const +{ CL_BREAK_IF(1 != parents_.size()); return parents_.front(); } @@ -54,11 +56,13 @@ Node* NodeBase::parent() const { // ///////////////////////////////////////////////////////////////////////////// // implementation of Trace::Node -void Node::notifyBirth(NodeBase *child) { +void Node::notifyBirth(NodeBase *child) +{ children_.push_back(child); } -void Node::notifyDeath(NodeBase *child) { +void Node::notifyDeath(NodeBase *child) +{ // remove the dead child from the list children_.erase( std::remove(children_.begin(), children_.end(), child), @@ -73,7 +77,8 @@ void Node::notifyDeath(NodeBase *child) { // ///////////////////////////////////////////////////////////////////////////// // implementation of Trace::NodeHandle -void NodeHandle::reset(Node *node) { +void NodeHandle::reset(Node *node) +{ // release the old node Node *&ref = parents_.front(); ref->notifyDeath(this); @@ -104,7 +109,8 @@ struct TracePlotter { } }; -std::string insnToLabel(const TInsn insn) { +std::string insnToLabel(const TInsn insn) +{ using boost::algorithm::replace_all; // dump the instruction to a string stream @@ -119,7 +125,8 @@ std::string insnToLabel(const TInsn insn) { return label; } -std::string insnToBlock(const TInsn insn) { +std::string insnToBlock(const TInsn insn) +{ CodeStorage::Block *bb = insn->bb; return (bb) ? bb->name() @@ -131,18 +138,21 @@ std::string insnToBlock(const TInsn insn) { #define INSN_LOC_AND_BB(insn) SL_QUOTE((insn)->loc << insnToBlock(insn)) -void TransientNode::plotNode(TracePlotter &tplot) const { +void TransientNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, color=red, fontcolor=red, label=" << SL_QUOTE(origin_) << "];\n"; } -void RootNode::plotNode(TracePlotter &tplot) const { +void RootNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=circle, color=black, fontcolor=black, label=\"start\"];\n"; } -void InsnNode::plotNode(TracePlotter &tplot) const { +void InsnNode::plotNode(TracePlotter &tplot) const +{ const char *color = (isBuiltin_) ? "blue" : "black"; @@ -154,7 +164,8 @@ void InsnNode::plotNode(TracePlotter &tplot) const { << "];\n"; } -void AbstractionNode::plotNode(TracePlotter &tplot) const { +void AbstractionNode::plotNode(TracePlotter &tplot) const +{ const char *label; switch (kind_) { case OK_SLS: @@ -175,59 +186,68 @@ void AbstractionNode::plotNode(TracePlotter &tplot) const { << SL_QUOTE(label) << "];\n"; } -void ConcretizationNode::plotNode(TracePlotter &tplot) const { +void ConcretizationNode::plotNode(TracePlotter &tplot) const +{ // TODO: kind_ tplot.out << "\t" << SL_QUOTE(this) << " [shape=ellipse, color=red, fontcolor=blue, label=" << SL_QUOTE("concretizeObj()") << "];\n"; } -void SpliceOutNode::plotNode(TracePlotter &tplot) const { +void SpliceOutNode::plotNode(TracePlotter &tplot) const +{ // TODO: kind_, successful_ tplot.out << "\t" << SL_QUOTE(this) << " [shape=ellipse, color=red, fontcolor=blue, label=" << SL_QUOTE("spliceOut*(len = " << len_ << ")") << "];\n"; } -void JoinNode::plotNode(TracePlotter &tplot) const { +void JoinNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=circle, color=red, fontcolor=red, label=\"join\"];\n"; } -void CloneNode::plotNode(TracePlotter &tplot) const { +void CloneNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=doubleoctagon, color=black" ", fontcolor=black, label=\"clone\"];\n"; } -void CallEntryNode::plotNode(TracePlotter &tplot) const { +void CallEntryNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, fontname=monospace, color=blue, fontcolor=blue" ", penwidth=3.0, label=\"--> call entry: " << (insnToLabel(insn_)) << "\", tooltip=\"" << insn_->loc << insn_->bb->name() << "\"];\n"; } -void CallCacheHitNode::plotNode(TracePlotter &tplot) const { +void CallCacheHitNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, fontname=monospace, color=gold, fontcolor=blue" ", penwidth=3.0, label=\"(x) call cache hit: " << (nameOf(*fnc_)) << "()\"];\n"; } -void CallFrameNode::plotNode(TracePlotter &tplot) const { +void CallFrameNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, fontname=monospace, color=blue, fontcolor=blue" ", label=\"--- call frame: " << (insnToLabel(insn_)) << "\", tooltip=" << INSN_LOC_AND_BB(insn_) << "];\n"; } -void CallDoneNode::plotNode(TracePlotter &tplot) const { +void CallDoneNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, fontname=monospace, color=blue, fontcolor=blue" ", penwidth=3.0, label=\"<-- call done: " << (nameOf(*fnc_)) << "()\"];\n"; } -void CondNode::plotNode(TracePlotter &tplot) const { +void CondNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=box, fontname=monospace" ", tooltip=" << INSN_LOC_AND_BB(inCnd_); @@ -251,7 +271,8 @@ void CondNode::plotNode(TracePlotter &tplot) const { tplot.out << "\"];\n"; } -void MsgNode::plotNode(TracePlotter &tplot) const { +void MsgNode::plotNode(TracePlotter &tplot) const +{ const char *color = "red"; const char *label; switch (level_) { @@ -280,12 +301,14 @@ void MsgNode::plotNode(TracePlotter &tplot) const { << SL_QUOTE((*loc_) << label) << "];\n"; } -void UserNode::plotNode(TracePlotter &tplot) const { +void UserNode::plotNode(TracePlotter &tplot) const +{ tplot.out << "\t" << SL_QUOTE(this) << " [shape=octagon, penwidth=3.0" ", color=green, fontcolor=black, label=\"" << label_ << "\"];\n"; } -void plotTraceCore(TracePlotter &tplot) { +void plotTraceCore(TracePlotter &tplot) +{ CL_DEBUG("plotTraceCore() is traversing a trace graph..."); TNodePair item; @@ -310,7 +333,8 @@ void plotTraceCore(TracePlotter &tplot) { } // FIXME: copy-pasted from symplot.cc -bool plotTrace(const std::string &name, TWorkList &wl) { +bool plotTrace(const std::string &name, TWorkList &wl) +{ PlotEnumerator *pe = PlotEnumerator::instance(); std::string plotName(pe->decorate(name)); std::string fileName(plotName + ".dot"); @@ -345,7 +369,8 @@ bool plotTrace(const std::string &name, TWorkList &wl) { return !!out; } -bool plotTrace(Node *endPoint, const std::string &name) { +bool plotTrace(Node *endPoint, const std::string &name) +{ const TNodePair item(/* from */ endPoint, /* to */ nullNode); TWorkList wl(item); return plotTrace(name, wl); @@ -355,7 +380,8 @@ bool plotTrace(Node *endPoint, const std::string &name) { // implementation of Trace::chkTraceGraphConsistency() template -bool isNodeKindReachble(Node *const from) { +bool isNodeKindReachble(Node *const from) +{ Node *node = from; WorkList wl(node); while (wl.next(node)) { @@ -370,7 +396,8 @@ bool isNodeKindReachble(Node *const from) { return false; } -bool chkTraceGraphConsistency(Node *const from) { +bool chkTraceGraphConsistency(Node *const from) +{ if (isNodeKindReachble(from)) { CL_WARN("CloneNode reachable from the given trace graph node"); plotTrace(from, "symtrace-CloneNode-reachable"); @@ -415,7 +442,8 @@ EndPointConsolidator::EndPointConsolidator(): { } -EndPointConsolidator::~EndPointConsolidator() { +EndPointConsolidator::~EndPointConsolidator() +{ if (d->dirty) CL_DEBUG("WARNING: EndPointConsolidator is destructed dirty"); @@ -425,7 +453,8 @@ EndPointConsolidator::~EndPointConsolidator() { delete d; } -bool /* any change */ EndPointConsolidator::insert(Node *endPoint) { +bool /* any change */ EndPointConsolidator::insert(Node *endPoint) +{ if (!insertOnce(d->nset, endPoint)) return false; @@ -435,7 +464,8 @@ bool /* any change */ EndPointConsolidator::insert(Node *endPoint) { return ((d->dirty = true)); } -bool EndPointConsolidator::plotAll(const std::string &name) { +bool EndPointConsolidator::plotAll(const std::string &name) +{ d->dirty = false; // schedule all end-points @@ -463,14 +493,16 @@ GraphProxy::GraphProxy(): { } -GraphProxy::~GraphProxy() { +GraphProxy::~GraphProxy() +{ BOOST_FOREACH(Private::TMap::const_reference item, d->gmap) delete /* (EndPointConsolidator *) */ item.second; delete d; } -bool /* any change */ GraphProxy::insert(Node *node, const std::string &name) { +bool /* any change */ GraphProxy::insert(Node *node, const std::string &name) +{ Private::TMap::const_iterator it = d->gmap.find(name); EndPointConsolidator *const epc = (d->gmap.end() == it) @@ -480,13 +512,15 @@ bool /* any change */ GraphProxy::insert(Node *node, const std::string &name) { return /* any change */ epc->insert(node); } -bool GraphProxy::plotGraph(const std::string &name) { +bool GraphProxy::plotGraph(const std::string &name) +{ CL_BREAK_IF(!hasKey(d->gmap, name)); return d->gmap[name]->plotAll(name); } -bool GraphProxy::plotAll() { +bool GraphProxy::plotAll() +{ bool ok = true; BOOST_FOREACH(Private::TMap::const_reference item, d->gmap) { @@ -510,7 +544,8 @@ Globals *Globals::inst_; // ///////////////////////////////////////////////////////////////////////////// // implementation of Trace::waiveCloneOperation() -void waiveCloneOperation(SymHeap &sh) { +void waiveCloneOperation(SymHeap &sh) +{ // just make sure the caller knows what is going on... Node *cnode = sh.traceNode(); CL_BREAK_IF(!dynamic_cast(cnode)); diff --git a/sl/symutil.cc b/sl/symutil.cc index 43efe6e67..27ed643b4 100644 --- a/sl/symutil.cc +++ b/sl/symutil.cc @@ -31,7 +31,8 @@ #include -bool numFromVal(IR::TInt *pDst, const SymHeapCore &sh, const TValId val) { +bool numFromVal(IR::TInt *pDst, const SymHeapCore &sh, const TValId val) +{ switch (val) { case VAL_NULL: *pDst = 0L; @@ -62,7 +63,8 @@ bool numFromVal(IR::TInt *pDst, const SymHeapCore &sh, const TValId val) { return true; } -bool rngFromVal(IR::Range *pDst, const SymHeapCore &sh, const TValId val) { +bool rngFromVal(IR::Range *pDst, const SymHeapCore &sh, const TValId val) +{ IR::TInt num; if (numFromVal(&num, sh, val)) { // a single number @@ -115,7 +117,8 @@ bool anyRangeFromVal( return false; } -bool stringFromVal(const char **pDst, const SymHeap &sh, const TValId val) { +bool stringFromVal(const char **pDst, const SymHeap &sh, const TValId val) +{ if (VT_CUSTOM != sh.valTarget(val)) // not a custom value return false; @@ -130,7 +133,8 @@ bool stringFromVal(const char **pDst, const SymHeap &sh, const TValId val) { return true; } -const IR::Range& rngFromCustom(const CustomValue &cv) { +const IR::Range& rngFromCustom(const CustomValue &cv) +{ const ECustomValue code = cv.code(); switch (code) { case CV_INT_RANGE: @@ -242,7 +246,8 @@ void moveKnownValueToLeft( valB = tmp; } -bool valInsideSafeRange(const SymHeapCore &sh, TValId val) { +bool valInsideSafeRange(const SymHeapCore &sh, TValId val) +{ const EValueTarget code = sh.valTarget(val); if (!isKnownObject(code)) return false; @@ -251,7 +256,8 @@ bool valInsideSafeRange(const SymHeapCore &sh, TValId val) { return (IR::Int0 < size.lo); } -bool canWriteDataPtrAt(const SymHeapCore &sh, TValId val) { +bool canWriteDataPtrAt(const SymHeapCore &sh, TValId val) +{ if (!isPossibleToDeref(sh.valTarget(val))) return false; @@ -312,7 +318,8 @@ TValId translateValProto( return dst.valCreate(code, origin); } -void initGlVar(SymHeap &sh, const CVar &cv) { +void initGlVar(SymHeap &sh, const CVar &cv) +{ CL_BREAK_IF(cv.inst); CL_BREAK_IF(isVarAlive(sh, cv)); @@ -367,7 +374,8 @@ bool /* anyChange */ redirectRefs( return anyChange; } -bool proveNeq(const SymHeapCore &sh, TValId ref, TValId val) { +bool proveNeq(const SymHeapCore &sh, TValId ref, TValId val) +{ // check for invalid values if (VAL_INVALID == ref || VAL_INVALID == val) return false; diff --git a/sl/symutil.hh b/sl/symutil.hh index 3b12d1d4d..291ffb63d 100644 --- a/sl/symutil.hh +++ b/sl/symutil.hh @@ -39,7 +39,8 @@ #include #include -inline TValId boolToVal(const bool b) { +inline TValId boolToVal(const bool b) +{ return (b) ? VAL_TRUE : VAL_FALSE; @@ -117,26 +118,30 @@ inline ObjHandle translateObjId( return ObjHandle(dst, dstAt, clt); } -inline TValId valOfPtrAt(SymHeap &sh, TValId at) { +inline TValId valOfPtrAt(SymHeap &sh, TValId at) +{ CL_BREAK_IF(!canWriteDataPtrAt(sh, at)); const PtrHandle ptr(sh, at); return ptr.value(); } -inline TValId valOfPtrAt(SymHeap &sh, TValId at, TOffset off) { +inline TValId valOfPtrAt(SymHeap &sh, TValId at, TOffset off) +{ const TValId ptrAt = sh.valByOffset(at, off); return valOfPtrAt(sh, ptrAt); } -inline bool isVarAlive(SymHeap &sh, const CVar &cv) { +inline bool isVarAlive(SymHeap &sh, const CVar &cv) +{ const TValId at = sh.addrOfVar(cv, /* createIfNeeded */ false); return 0 < at; } void initGlVar(SymHeap &sh, const CVar &cv); -inline TValId nextRootObj(SymHeap &sh, TValId root, TOffset offNext) { +inline TValId nextRootObj(SymHeap &sh, TValId root, TOffset offNext) +{ CL_BREAK_IF(sh.valOffset(root)); const TValId valNext = valOfPtrAt(sh, root, offNext); return sh.valRoot(valNext); @@ -190,7 +195,8 @@ inline bool areUniBlocksEqual( } /// needed because of VT_RANGE vs. VT_ABSTRACT (suboptimal design?) -inline EValueTarget realValTarget(const SymHeap &sh, const TValId val) { +inline EValueTarget realValTarget(const SymHeap &sh, const TValId val) +{ const EValueTarget code = sh.valTarget(val); return (VT_RANGE == code) ? sh.valTarget(sh.valRoot(val)) From 48ee4307aa655c242a4d5a5cf40754dad4e69762 Mon Sep 17 00:00:00 2001 From: Kamil Dudka Date: Wed, 5 Sep 2012 19:59:00 +0200 Subject: [PATCH 3/4] sl/intrange: make the GCD algorithm more lightweight --- sl/intrange.cc | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/sl/intrange.cc b/sl/intrange.cc index ea3135e8e..10d21d7f1 100644 --- a/sl/intrange.cc +++ b/sl/intrange.cc @@ -66,8 +66,7 @@ void chkRange(const Range &rng) CL_BREAK_IF(1 + rng.hi - rng.lo < rng.alignment); } -// TODO: replace this implementation by something useful (it can loop badly) -TInt greatestCommonDivisor(TInt a, TInt b) +TInt approxGCD(TInt a, TInt b) { CL_BREAK_IF(a < RZ_MIN || RZ_MAX < a); CL_BREAK_IF(b < RZ_MIN || RZ_MAX < b); @@ -81,15 +80,20 @@ TInt greatestCommonDivisor(TInt a, TInt b) if (b < Int0) b = -b; - while (a != b) { - if (a < b) - b -= a; - else - a -= b; + TInt gcd = Int1; + + for (unsigned i = 1; gcd <= a && gcd <= b && gcd < RZ_MAX; ++i) { + const TInt mask = (Int1 << i) - Int1; + + if (a & mask) + break; + if (b & mask) + break; + + gcd = Int1 << i; } - CL_BREAK_IF(a < Int1); - return a; + return gcd; } Range join(const Range &rng1, const Range &rng2) @@ -124,7 +128,7 @@ bool isCovered(const Range &small, const Range &big) return (big.lo <= small.lo) && (small.hi <= big.hi) && (Int1 == big.alignment || big.alignment == - greatestCommonDivisor(small.alignment, big.alignment)); + approxGCD(small.alignment, big.alignment)); } bool isSingular(const Range &range) @@ -240,7 +244,7 @@ Range& operator+=(Range &rng, const Range &other) // compute the resulting alignment rng.alignment = Int1; if (!isSingular(rng)) - rng.alignment = greatestCommonDivisor(al1, al2); + rng.alignment = approxGCD(al1, al2); chkRange(rng); return rng; From 0a18a975b7ba2c5cde293ca4de8ad6279f668ddf Mon Sep 17 00:00:00 2001 From: Kamil Dudka Date: Wed, 5 Sep 2012 19:28:39 +0200 Subject: [PATCH 4/4] cl: avoid signed overflow on integral literals Reported by Ondra Lengal. --- cl/cl_pp.cc | 3 +++ cl/cldebug.cc | 6 +++++- cl/gcc/clplug.c | 1 + include/cl/code_listener.h | 11 ++++++++--- sl/symproc.cc | 4 +++- tests/forester-regre/test-p0028.err | 2 +- 6 files changed, 21 insertions(+), 6 deletions(-) diff --git a/cl/cl_pp.cc b/cl/cl_pp.cc index 288df8b52..b10a68e43 100644 --- a/cl/cl_pp.cc +++ b/cl/cl_pp.cc @@ -282,6 +282,9 @@ void ClPrettyPrint::printIntegralCst(const struct cl_operand *op) if (value < 0) out_ << SSD_INLINE_COLOR(C_LIGHT_RED, "("); + if (op->type->is_unsigned) + out_ << "U"; + SSD_COLORIZE(out_, C_WHITE) << value; if (value < 0) out_ << SSD_INLINE_COLOR(C_LIGHT_RED, ")"); diff --git a/cl/cldebug.cc b/cl/cldebug.cc index 5e9db0930..9a12de59e 100644 --- a/cl/cldebug.cc +++ b/cl/cldebug.cc @@ -187,7 +187,11 @@ void operandToStreamCstInt(std::ostream &str, const struct cl_operand &op) break; case CL_TYPE_INT: - str << "(int)" << val; + str << "(int)"; + if (op.type->is_unsigned) + str << "U"; + + str << val; break; case CL_TYPE_BOOL: diff --git a/cl/gcc/clplug.c b/cl/gcc/clplug.c index 372bf8b0e..2765eb3c7 100644 --- a/cl/gcc/clplug.c +++ b/cl/gcc/clplug.c @@ -986,6 +986,7 @@ static void read_cst_int(struct cl_operand *op, tree t) CL_BREAK_IF(TREE_INT_CST_HIGH(t) != 0 && (TREE_INT_CST_LOW(t) == 0 || TREE_INT_CST_HIGH(t) != -1)); + // FIXME: should we read unsigned types separately? op->code = CL_OPERAND_CST; op->data.cst.code = CL_TYPE_INT; op->data.cst.data.cst_int.value = TREE_INT_CST_LOW(t); diff --git a/include/cl/code_listener.h b/include/cl/code_listener.h index 937a32d0d..ccec67fc9 100644 --- a/include/cl/code_listener.h +++ b/include/cl/code_listener.h @@ -388,10 +388,15 @@ struct cl_cst { } cst_fnc; /**< valid only for @b CL_TYPE_FNC */ - /* CL_TYPE_INT */ + /* CL_TYPE_INT when is_unsigned is false */ struct { - int value; - } cst_int; /**< valid only for @b CL_TYPE_INT */ + long value; + } cst_int; /**< valid only for @b CL_TYPE_INT and !is_unsigned */ + + /* CL_TYPE_INT when is_unsigned is true */ + struct { + unsigned long value; + } cst_uint; /**< valid only for @b CL_TYPE_INT and is_unsigned */ /* CL_TYPE_STRING */ struct { diff --git a/sl/symproc.cc b/sl/symproc.cc index d76ad89b0..35d90d20e 100644 --- a/sl/symproc.cc +++ b/sl/symproc.cc @@ -103,7 +103,9 @@ TValId SymProc::valFromCst(const struct cl_operand &op) case CL_TYPE_ENUM: case CL_TYPE_INT: // integral value - cv = CustomValue(IR::rngFromNum(cst.data.cst_int.value)); + cv = CustomValue(IR::rngFromNum( + /* FIXME: deal better with integer literals */ + static_cast(cst.data.cst_int.value))); break; case CL_TYPE_REAL: diff --git a/tests/forester-regre/test-p0028.err b/tests/forester-regre/test-p0028.err index f31b84467..39d9f7d26 100644 --- a/tests/forester-regre/test-p0028.err +++ b/tests/forester-regre/test-p0028.err @@ -1,2 +1,2 @@ -test-p0028.c:24:13: note: item = malloc ((int)4) +test-p0028.c:24:13: note: item = malloc ((int)U4) test-p0028.c:24:13: error: allocated block size mismatch