Skip to content

Commit b9276f5

Browse files
committed
Update on "Misc changes from compiled autograd branch"
This PR pulls out some standalone changes from #103822 cc voznesenskym penguinwu anijain2305 EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng Xia-Weiwen wenzhe-nrv jiayisunx ipiszy chenyang78 [ghstack-poisoned]
2 parents 05bac9d + 7fdae0a commit b9276f5

File tree

81 files changed

+2648
-1942
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

81 files changed

+2648
-1942
lines changed

.ci/docker/common/install_base.sh

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,7 @@ install_ubuntu() {
7171
libtool \
7272
vim \
7373
unzip \
74-
gdb \
75-
libxml2-dev \
76-
libxslt-dev
74+
gdb
7775

7876
# Should resolve issues related to various apt package repository cert issues
7977
# see: https://github.com/pytorch/pytorch/issues/65931

.ci/docker/common/install_onnx.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ pip_install \
2424
transformers==4.25.1
2525

2626
# TODO: change this when onnx-script is on testPypi
27-
pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@7e131c578f290ffad1f26bacda11a83daf5476ba"
27+
pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@2bb3e9f2d094912f81cb63cecb412efb14c65738"
2828

2929
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
3030
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/

.ci/docker/requirements-ci.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ opt-einsum==3.3
124124
#Pinned versions: 3.3
125125
#test that import: test_linalg.py
126126

127-
#pillow
127+
pillow==9.2.0 ; python_version <= "3.8"
128+
pillow==9.5.0 ; python_version > "3.8"
128129
#Description: Python Imaging Library fork
129130
#Pinned versions:
130131
#test that import:
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
4.27.4
1+
4.30.2

aten/src/ATen/CPUGeneratorImpl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
2121
uint64_t seed() override;
2222
void set_state(const c10::TensorImpl& new_state) override;
2323
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
24-
static DeviceType device_type();
24+
static c10::DeviceType device_type();
2525
uint32_t random();
2626
uint64_t random64();
2727
c10::optional<float> next_float_normal_sample();

aten/src/ATen/Context.h

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class TORCH_API Context {
3434
Context();
3535

3636
const Generator& defaultGenerator(Device device) {
37-
DeviceType device_type = device.type();
37+
c10::DeviceType device_type = device.type();
3838
initCUDAIfNeeded(device_type);
3939
initHIPIfNeeded(device_type);
4040
if (device_type == at::kCPU) {
@@ -44,18 +44,18 @@ class TORCH_API Context {
4444
} else if (device_type == at::kMPS) {
4545
return at::detail::getMPSHooks().getDefaultMPSGenerator();
4646
} else {
47-
AT_ERROR(DeviceTypeName(device_type), " device type not enabled.");
47+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
4848
}
4949
}
50-
Device getDeviceFromPtr(void* data, DeviceType device_type) {
50+
Device getDeviceFromPtr(void* data, c10::DeviceType device_type) {
5151
initCUDAIfNeeded(device_type);
5252
initHIPIfNeeded(device_type);
5353
if (device_type == at::kCPU) {
54-
return DeviceType::CPU;
54+
return c10::DeviceType::CPU;
5555
} else if (device_type == at::kCUDA) {
5656
return at::detail::getCUDAHooks().getDeviceFromPtr(data);
5757
} else {
58-
AT_ERROR(DeviceTypeName(device_type), " device type not enabled.");
58+
AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled.");
5959
}
6060
}
6161
static bool isPinnedPtr(const void* data) {
@@ -96,19 +96,19 @@ class TORCH_API Context {
9696
return detail::getMPSHooks().hasMPS();
9797
}
9898
static bool hasIPU() {
99-
return c10::impl::hasDeviceGuardImpl(at::DeviceType::IPU);
99+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU);
100100
}
101101
static bool hasXLA() {
102-
return c10::impl::hasDeviceGuardImpl(at::DeviceType::XLA);
102+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA);
103103
}
104104
static bool hasXPU() {
105105
return detail::getXPUHooks().hasXPU();
106106
}
107107
static bool hasLazy() {
108-
return c10::impl::hasDeviceGuardImpl(at::DeviceType::Lazy);
108+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy);
109109
}
110110
static bool hasORT() {
111-
return c10::impl::hasDeviceGuardImpl(at::DeviceType::ORT);
111+
return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT);
112112
}
113113
// defined in header so that getNonVariableType has ability to inline
114114
// call_once check. getNonVariableType is called fairly frequently
@@ -273,13 +273,13 @@ class TORCH_API Context {
273273
void unsetDefaultMobileCPUAllocator();
274274

275275
private:
276-
void initCUDAIfNeeded(DeviceType p) {
277-
if (p == DeviceType::CUDA) {
276+
void initCUDAIfNeeded(c10::DeviceType p) {
277+
if (p == c10::DeviceType::CUDA) {
278278
lazyInitCUDA();
279279
}
280280
}
281-
void initHIPIfNeeded(DeviceType p) {
282-
if (p == DeviceType::HIP) {
281+
void initHIPIfNeeded(c10::DeviceType p) {
282+
if (p == c10::DeviceType::HIP) {
283283
lazyInitHIP();
284284
}
285285
}
@@ -428,7 +428,7 @@ static inline bool hasMKLDNN() {
428428
}
429429

430430
static inline void manual_seed(uint64_t seed) {
431-
auto gen = globalContext().defaultGenerator(DeviceType::CPU);
431+
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
432432
{
433433
// See Note [Acquire lock when using random generators]
434434
std::lock_guard<std::mutex> lock(gen.mutex());
@@ -450,7 +450,7 @@ static inline void manual_seed(uint64_t seed) {
450450
}
451451

452452
if (hasMPS()) {
453-
auto mps_gen = globalContext().defaultGenerator(DeviceType::MPS);
453+
auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS);
454454
// See Note [Acquire lock when using random generators]
455455
std::lock_guard<std::mutex> lock(mps_gen.mutex());
456456
mps_gen.set_current_seed(seed);

aten/src/ATen/TensorIterator.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ struct TORCH_API TensorIteratorBase : public impl::MetaBase {
309309
Device device(int arg = 0) const {
310310
return operands_[arg].device.value();
311311
}
312-
DeviceType device_type(int arg = 0) const {
312+
c10::DeviceType device_type(int arg = 0) const {
313313
return device(arg).type();
314314
}
315315
int64_t element_size(int arg) const {

aten/src/ATen/Utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
3333
ArrayRef<Tensor> tensors,
3434
const char* name,
3535
int pos,
36-
DeviceType device_type,
36+
c10::DeviceType device_type,
3737
ScalarType scalar_type) {
3838
std::vector<TensorImpl*> unwrapped;
3939
unwrapped.reserve(tensors.size());

0 commit comments

Comments
 (0)