From e16036283726dd12bb29a0e70b115c903ef03587 Mon Sep 17 00:00:00 2001 From: Alban Desmaison Date: Mon, 11 Jan 2021 15:15:07 -0800 Subject: [PATCH] Add range assert in autograd engine queue lookup (#50372) Summary: Follow up to https://github.com/pytorch/pytorch/issues/49652 Pull Request resolved: https://github.com/pytorch/pytorch/pull/50372 Reviewed By: zhangguanheng66 Differential Revision: D25872203 Pulled By: albanD fbshipit-source-id: 8d6f30f17fba856c5c34c08372767349a250983d --- torch/csrc/autograd/engine.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index ad217c2924ad..af295feba51a 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -1051,6 +1051,8 @@ auto Engine::ready_queue_by_index(std::shared_ptr cpu_ready_queue, i TORCH_INTERNAL_ASSERT(cpu_ready_queue); return cpu_ready_queue; } else { + // Static cast is ok here as the number of device should never overflow an int. + TORCH_INTERNAL_ASSERT(0 <= device_index && device_index < static_cast(device_ready_queues_.size())); // See Note [Allocating GPUs to autograd threads] // NB: This function would become obsolete if we truly allocated a CPU thread // per device, rather than colocate.