Skip to content

Commit

Permalink
[JIT] Fix clang-tidy warnings in jit/runtime (#47992)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: #47992

Test Plan: Imported from OSS

Reviewed By: ZolotukhinM

Differential Revision: D25258645

Pulled By: SplitInfinity

fbshipit-source-id: b3e4576400c101b247e80cb4044fc04471f39a47
  • Loading branch information
Meghan Lele authored and facebook-github-bot committed Dec 2, 2020
1 parent a25d52f commit fc1153a
Show file tree
Hide file tree
Showing 11 changed files with 41 additions and 31 deletions.
10 changes: 5 additions & 5 deletions torch/csrc/jit/runtime/graph_executor.cpp
Expand Up @@ -131,7 +131,7 @@ struct CaptureList {
auto tensors = val.toTensorList();
sizes_.push_back(tensors.size());

for (const at::Tensor& tensor : tensors) {
for (const at::Tensor tensor : tensors) {
captureTensor(tensor, is_output);
}
} else {
Expand Down Expand Up @@ -269,7 +269,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
size_t output_index = 0;
for (IValue& v : stack) {
if (v.isTensorList()) {
for (const at::Tensor& tensor : v.toTensorList()) {
for (at::Tensor tensor : v.toTensorList()) {
produceOutput(output_index++, std::move(tensor), outputs);
}
} else if (v.isTensor()) {
Expand All @@ -295,7 +295,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
}
void addOutputForIValue(const IValue& value) {
if (value.isTensorList()) {
for (const at::Tensor& tensor : value.toTensorList()) {
for (const at::Tensor tensor : value.toTensorList()) {
addOutputForTensor(tensor);
}
} else {
Expand All @@ -319,7 +319,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
if (v.isTensorList()) {
auto tensors = v.toTensorList();
input_instructions_.pushTensorList(tensors.size());
for (const at::Tensor& tensor : tensors) {
for (const at::Tensor tensor : tensors) {
addInputVariable(tensor);
}
} else if (v.isTensor()) {
Expand Down Expand Up @@ -719,7 +719,7 @@ struct GraphExecutorImpl : public GraphExecutorImplBase {
};

GraphExecutor::GraphExecutor(
std::shared_ptr<Graph> graph,
const std::shared_ptr<Graph>& graph,
std::string function_name)
: pImpl(
IsNewExecutorEnabled()
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/graph_executor.h
Expand Up @@ -55,7 +55,7 @@ struct TORCH_API EnableProfilingGuard {
struct GraphExecutorImplBase;
struct TORCH_API GraphExecutor {
GraphExecutor() = default;
GraphExecutor(std::shared_ptr<Graph> graph, std::string function_name);
GraphExecutor(const std::shared_ptr<Graph>& graph, std::string function_name);

void run(Stack& inputs);
c10::intrusive_ptr<Future> runAsync(
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/interpreter.cpp
Expand Up @@ -706,7 +706,7 @@ struct CodeImpl {
void emitCall(Function* func, at::ArrayRef<Value*> inputs) {
emitLoadInputs(inputs);
insertInstruction(CALL, function_table_.size());
function_table_.emplace_back(std::move(func));
function_table_.emplace_back(func);
}

void emitNodeAtBlockLevel(Node* node) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/interpreter.h
Expand Up @@ -102,7 +102,7 @@ struct Suspend : public std::exception {
// thread local settings are propagated with ThreadLocalState
struct InterpreterContinuation {
InterpreterContinuation(
InterpreterState state_,
const InterpreterState& state_,
Stack stack_,
int64_t dist_autograd_context_id = 0,
c10::optional<at::ThreadLocalState> tls_state = c10::nullopt)
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/jit/runtime/logging.h
Expand Up @@ -16,7 +16,7 @@ class LoggerBase {
TORCH_API virtual void addStatValue(
const std::string& stat_name,
int64_t val) = 0;
virtual ~LoggerBase() {}
virtual ~LoggerBase() = default;
};

TORCH_API LoggerBase* getLogger();
Expand All @@ -28,7 +28,7 @@ TORCH_API LoggerBase* setLogger(LoggerBase* logger);
class NoopLogger : public LoggerBase {
public:
void addStatValue(const std::string& stat_name, int64_t val) override {}
~NoopLogger() {}
~NoopLogger() = default;
};

// Trivial locking logger. Pass in an instance of this to setLogger() to use it.
Expand All @@ -42,7 +42,7 @@ class TORCH_API LockingLogger : public LoggerBase {
virtual int64_t getCounterValue(const std::string& name) const;
enum class AggregationType { SUM = 0, AVG = 1 };
void setAggregationType(const std::string& stat_name, AggregationType type);
~LockingLogger() {}
~LockingLogger() = default;

private:
mutable std::mutex m;
Expand Down
8 changes: 3 additions & 5 deletions torch/csrc/jit/runtime/operator.h
Expand Up @@ -73,7 +73,7 @@ struct TORCH_API Operator {
public:
Operator(c10::OperatorHandle opHandle, Operation operation)
: op_(c10::make_left<C10Operator, JitOnlyOperator>(
C10Operator{std::move(opHandle), std::move(operation)})) {}
C10Operator{opHandle, std::move(operation)})) {}

Operator(
std::string schema,
Expand Down Expand Up @@ -102,8 +102,7 @@ struct TORCH_API Operator {
: op_(c10::make_right<C10Operator, JitOnlyOperator>(JitOnlyOperator{
c10::make_right<FunctionSchema, UnparsedFunctionSchema>(
UnparsedFunctionSchema{std::move(schema), alias_analysis}),
c10::make_right<Operation, OperationCreator>(
std::move(op_creator))})) {}
c10::make_right<Operation, OperationCreator>(op_creator)})) {}

// Helper constructor to register `op` to run
// run for _every_ IR Node where n.kind() == name, regardless of arguments.
Expand All @@ -116,8 +115,7 @@ struct TORCH_API Operator {
: op_(c10::make_right<C10Operator, JitOnlyOperator>(JitOnlyOperator{
c10::make_left<FunctionSchema, UnparsedFunctionSchema>(
varArgSchemaWithName(name, alias_analysis)),
c10::make_right<Operation, OperationCreator>(
std::move(op_creator))})) {}
c10::make_right<Operation, OperationCreator>(op_creator)})) {}

Operation getOperation(const Node* node = nullptr) const {
return op_.fold<Operation>(
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/register_ops_utils.h
Expand Up @@ -179,7 +179,7 @@ void setItem(const c10::List<T>& list, int64_t idx, T&& value) {
if (normalized_idx < 0 || normalized_idx >= list_size) {
throw std::out_of_range("list index out of range");
}
list.set(normalized_idx, std::move(value));
list.set(normalized_idx, std::forward<T>(value));
}

void listAppend(Stack* stack);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/register_prim_ops.cpp
Expand Up @@ -1347,7 +1347,7 @@ int64_t normalizeIndex(int64_t idx, int64_t list_size) {

int64_t stringFindImpl(
std::string string,
std::string substr,
const std::string& substr,
int64_t start,
int64_t end,
bool reverse = false) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp
Expand Up @@ -348,7 +348,7 @@ RegisterOperators reg(
at::infer_size(size, peek(stack, i, num_inputs).toIntVector());
}
drop(stack, num_inputs);
push(stack, IValue(std::move(size)));
push(stack, IValue(size));
},
aliasAnalysisSpecialCase()),
Operator(
Expand Down
24 changes: 15 additions & 9 deletions torch/csrc/jit/runtime/vararg_functions.cpp
Expand Up @@ -204,7 +204,10 @@ void namedTupleConstruct(
c10::ivalue::Tuple::createNamed(std::move(elems), std::move(type)));
}

void listConstruct(Stack& stack, at::ListTypePtr type, size_t num_inputs) {
void listConstruct(
Stack& stack,
const at::ListTypePtr& type,
size_t num_inputs) {
c10::List<IValue> vals(type->getElementType());
vals.reserve(num_inputs);
for (size_t i = stack.size() - num_inputs; i < stack.size(); ++i) {
Expand All @@ -214,7 +217,10 @@ void listConstruct(Stack& stack, at::ListTypePtr type, size_t num_inputs) {
push(stack, std::move(vals));
}

void dictConstruct(Stack& stack, at::DictTypePtr type, size_t num_inputs) {
void dictConstruct(
Stack& stack,
const at::DictTypePtr& type,
size_t num_inputs) {
at::TypePtr key_type = type->getKeyType();
at::TypePtr value_type = type->getValueType();
auto vals = c10::impl::GenericDict(key_type, value_type);
Expand All @@ -231,7 +237,7 @@ void dictConstruct(Stack& stack, at::DictTypePtr type, size_t num_inputs) {
push(stack, std::move(vals));
}

void createObject(Stack& stack, at::ClassTypePtr type) {
void createObject(Stack& stack, const at::ClassTypePtr& type) {
auto userObj = c10::ivalue::Object::create(
c10::StrongTypePtr(type->compilation_unit(), type),
type->numAttributes());
Expand Down Expand Up @@ -267,19 +273,19 @@ void dequantize(Stack& stack) {
auto elems = tuple->elements();
std::vector<IValue> output_elems;
output_elems.reserve(elems.size());
for (size_t i = 0; i < elems.size(); ++i) {
if (elems[i].isTensor()) {
output_elems.emplace_back(at::dequantize(elems[i].toTensor()));
for (const auto& elem : elems) {
if (elem.isTensor()) {
output_elems.emplace_back(at::dequantize(elem.toTensor()));
} else {
output_elems.emplace_back(elems[i]);
output_elems.emplace_back(elem);
}
}
push(stack, c10::ivalue::Tuple::create(std::move(output_elems)));
} else if (iv.isTensorList()) {
auto elems = iv.toTensorList();
auto output_list = c10::impl::GenericList(elems.elementType());
for (size_t i = 0; i < elems.size(); ++i) {
output_list.emplace_back(at::dequantize(elems[i]));
for (auto&& elem : elems) {
output_list.emplace_back(at::dequantize(elem));
}
push(stack, std::move(output_list));
} else {
Expand Down
12 changes: 9 additions & 3 deletions torch/csrc/jit/runtime/vararg_functions.h
Expand Up @@ -22,11 +22,17 @@ void namedTupleConstruct(
at::TupleTypePtr type,
size_t num_inputs);

void listConstruct(Stack& stack, at::ListTypePtr list_type, size_t num_inputs);
void listConstruct(
Stack& stack,
const at::ListTypePtr& list_type,
size_t num_inputs);

void dictConstruct(Stack& stack, at::DictTypePtr type, size_t num_inputs);
void dictConstruct(
Stack& stack,
const at::DictTypePtr& type,
size_t num_inputs);

void createObject(Stack& stack, at::ClassTypePtr type);
void createObject(Stack& stack, const at::ClassTypePtr& type);

void isinstance(Stack& stack, at::ArrayRef<at::TypePtr> types);

Expand Down

0 comments on commit fc1153a

Please sign in to comment.