Skip to content

Commit

Permalink
Update on "Misc changes from compiled autograd branch"
Browse files Browse the repository at this point in the history
This PR pulls out some standalone changes from #103822


cc voznesenskym penguinwu anijain2305 EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng Xia-Weiwen wenzhe-nrv jiayisunx ipiszy chenyang78

[ghstack-poisoned]
  • Loading branch information
jansel committed Jul 8, 2023
2 parents 05bac9d + 7fdae0a commit b9276f5
Show file tree
Hide file tree
Showing 81 changed files with 2,648 additions and 1,942 deletions.
4 changes: 1 addition & 3 deletions .ci/docker/common/install_base.sh
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ install_ubuntu() {
libtool \
vim \
unzip \
gdb \
libxml2-dev \
libxslt-dev
gdb

# Should resolve issues related to various apt package repository cert issues
# see: https://github.com/pytorch/pytorch/issues/65931
Expand Down
2 changes: 1 addition & 1 deletion .ci/docker/common/install_onnx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ pip_install \
transformers==4.25.1

# TODO: change this when onnx-script is on testPypi
pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@7e131c578f290ffad1f26bacda11a83daf5476ba"
pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@2bb3e9f2d094912f81cb63cecb412efb14c65738"

# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
Expand Down
3 changes: 2 additions & 1 deletion .ci/docker/requirements-ci.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,8 @@ opt-einsum==3.3
#Pinned versions: 3.3
#test that import: test_linalg.py

#pillow
pillow==9.2.0 ; python_version <= "3.8"
pillow==9.5.0 ; python_version > "3.8"
#Description: Python Imaging Library fork
#Pinned versions:
#test that import:
Expand Down
2 changes: 1 addition & 1 deletion .github/ci_commit_pins/huggingface.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
4.27.4
4.30.2
2 changes: 1 addition & 1 deletion aten/src/ATen/CPUGeneratorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
uint64_t seed() override;
void set_state(const c10::TensorImpl& new_state) override;
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
static DeviceType device_type();
static c10::DeviceType device_type();
uint32_t random();
uint64_t random64();
c10::optional<float> next_float_normal_sample();
Expand Down
30 changes: 15 additions & 15 deletions aten/src/ATen/Context.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class TORCH_API Context {
Context();

const Generator& defaultGenerator(Device device) {
DeviceType device_type = device.type();
c10::DeviceType device_type = device.type();
initCUDAIfNeeded(device_type);
initHIPIfNeeded(device_type);
if (device_type == at::kCPU) {
Expand All @@ -44,18 +44,18 @@ class TORCH_API Context {
} else if (device_type == at::kMPS) {
return at::detail::getMPSHooks().getDefaultMPSGenerator();
} else {
AT_ERROR(DeviceTypeName(device_type), " device type not enabled.");
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
}
}
Device getDeviceFromPtr(void* data, DeviceType device_type) {
Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
initCUDAIfNeeded(device_type);
initHIPIfNeeded(device_type);
if (device_type == at::kCPU) {
return DeviceType::CPU;
return c10::DeviceType::CPU;
} else if (device_type == at::kCUDA) {
return at::detail::getCUDAHooks().getDeviceFromPtr(data);
} else {
AT_ERROR(DeviceTypeName(device_type), " device type not enabled.");
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
}
}
static bool isPinnedPtr(const void* data) {
Expand Down Expand Up @@ -96,19 +96,19 @@ class TORCH_API Context {
return detail::getMPSHooks().hasMPS();
}
static bool hasIPU() {
return c10::impl::hasDeviceGuardImpl(at::DeviceType::IPU);
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
}
static bool hasXLA() {
return c10::impl::hasDeviceGuardImpl(at::DeviceType::XLA);
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
}
static bool hasXPU() {
return detail::getXPUHooks().hasXPU();
}
static bool hasLazy() {
return c10::impl::hasDeviceGuardImpl(at::DeviceType::Lazy);
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
}
static bool hasORT() {
return c10::impl::hasDeviceGuardImpl(at::DeviceType::ORT);
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
}
// defined in header so that getNonVariableType has ability to inline
// call_once check. getNonVariableType is called fairly frequently
Expand Down Expand Up @@ -273,13 +273,13 @@ class TORCH_API Context {
void unsetDefaultMobileCPUAllocator();

private:
void initCUDAIfNeeded(DeviceType p) {
if (p == DeviceType::CUDA) {
void initCUDAIfNeeded(c10::DeviceType p) {
if (p == c10::DeviceType::CUDA) {
lazyInitCUDA();
}
}
void initHIPIfNeeded(DeviceType p) {
if (p == DeviceType::HIP) {
void initHIPIfNeeded(c10::DeviceType p) {
if (p == c10::DeviceType::HIP) {
lazyInitHIP();
}
}
Expand Down Expand Up @@ -428,7 +428,7 @@ static inline bool hasMKLDNN() {
}

static inline void manual_seed(uint64_t seed) {
auto gen = globalContext().defaultGenerator(DeviceType::CPU);
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen.mutex());
Expand All @@ -450,7 +450,7 @@ static inline void manual_seed(uint64_t seed) {
}

if (hasMPS()) {
auto mps_gen = globalContext().defaultGenerator(DeviceType::MPS);
auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(mps_gen.mutex());
mps_gen.set_current_seed(seed);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/TensorIterator.h
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase {
Device device(int arg = 0) const {
return operands_[arg].device.value();
}
DeviceType device_type(int arg = 0) const {
c10::DeviceType device_type(int arg = 0) const {
return device(arg).type();
}
int64_t element_size(int arg) const {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
ArrayRef<Tensor> tensors,
const char* name,
int pos,
DeviceType device_type,
c10::DeviceType device_type,
ScalarType scalar_type) {
std::vector<TensorImpl*> unwrapped;
unwrapped.reserve(tensors.size());
Expand Down

0 comments on commit b9276f5

Please sign in to comment.