Skip to content

Commit

Permalink
Merge pull request #526 from helmholtz-analytics/bugfixes/491-dtype-h…
Browse files Browse the repository at this point in the history
…omogenization

float32 are now consistent default data types in factories
  • Loading branch information
Markus-Goetz committed Apr 6, 2020
2 parents 95fda46 + 9c6b18f commit 0762ebd
Show file tree
Hide file tree
Showing 7 changed files with 102 additions and 121 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
- [#519](https://github.com/helmholtz-analytics/heat/pull/519) Bugfix: distributed slicing with empty list or scalar as input; distributed nonzero() of empty (local) tensor.
- [#521](https://github.com/helmholtz-analytics/heat/pull/521) Add documentation for the generic reduce_op in Heat's core
- [#522](https://github.com/helmholtz-analytics/heat/pull/522) Added CUDA-aware MPI detection for MVAPICH, MPICH and ParaStation.
- [#526](https://github.com/helmholtz-analytics/heat/pull/526) float32 is now consistent default dtype for factories.

# v0.3.0

Expand Down
10 changes: 5 additions & 5 deletions heat/core/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def __kundu_transform(values):
return (torch.log(-torch.log(1 - values ** 0.0775)) - 1.0821) * __KUNDU_INVERSE


def rand(*args, dtype=types.float64, split=None, device=None, comm=None):
def rand(*args, dtype=types.float32, split=None, device=None, comm=None):
"""
Random values in a given shape.
Expand All @@ -249,7 +249,7 @@ def rand(*args, dtype=types.float64, split=None, device=None, comm=None):
The dimensions of the returned array, should all be positive. If no argument is given a single random samples is
generated.
dtype: ht.types, optional
The datatype of the returned values. Has to be one of [ht.float32, ht.float64]. Default is ht.float64.
The datatype of the returned values. Has to be one of [ht.float32, ht.float64]. Default is ht.float32.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str or None, optional
Expand Down Expand Up @@ -349,7 +349,7 @@ def randint(low, high=None, size=None, dtype=None, split=None, device=None, comm

# sanitize the data type
if dtype is None:
dtype = types.int64
dtype = types.int32
dtype = types.canonical_heat_type(dtype)
if dtype not in [types.int64, types.int32]:
raise ValueError("Unsupported dtype for randint")
Expand All @@ -374,7 +374,7 @@ def randint(low, high=None, size=None, dtype=None, split=None, device=None, comm
return dndarray.DNDarray(values, shape, dtype, split, device, comm)


def randn(*args, dtype=types.float64, split=None, device=None, comm=None):
def randn(*args, dtype=types.float32, split=None, device=None, comm=None):
"""
Returns a tensor filled with random numbers from a standard normal distribution with zero mean and variance of one.
Expand All @@ -383,7 +383,7 @@ def randn(*args, dtype=types.float64, split=None, device=None, comm=None):
d0, d1, …, dn : int, optional
The dimensions of the returned array, should be all positive.
dtype: ht.types, optional
The datatype of the returned values. Has to be one of [ht.float32, ht.float64]. Default is ht.float64.
The datatype of the returned values. Has to be one of [ht.float32, ht.float64]. Default is ht.float32.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str or None, optional
Expand Down
20 changes: 10 additions & 10 deletions heat/core/tests/test_communication.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def test_allgather(self):

# contiguous data, different gather axis
data = ht.ones((7, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, dtype=ht.float64, device=ht_device)

# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
Expand Down Expand Up @@ -961,7 +961,7 @@ def test_iallgather(self):

# contiguous data, different gather axis
data = ht.ones((7, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, device=ht_device)
output = ht.random.randn(7, 2 * ht.MPI_WORLD.size, dtype=ht.float64, device=ht_device)

# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
Expand Down Expand Up @@ -1491,7 +1491,7 @@ def test_igather(self):
try:
# contiguous data
data = ht.ones((1, 5), dtype=ht.float64, device=ht_device)
output = ht.random.randn(ht.MPI_WORLD.size, 5, device=ht_device)
output = ht.random.randn(ht.MPI_WORLD.size, 5, dtype=ht.float64, device=ht_device)

# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
Expand All @@ -1506,13 +1506,13 @@ def test_igather(self):
self.assertTrue(
(
output._DNDarray__array
== torch.ones((ht.MPI_WORLD.size, 5), dtype=torch.float32, device=device)
== torch.ones((ht.MPI_WORLD.size, 5), dtype=torch.float64, device=device)
).all()
)

# contiguous data, different gather axis
data = ht.ones((5, 2), dtype=ht.float64, device=ht_device)
output = ht.random.randn(5, 2 * ht.MPI_WORLD.size, device=ht_device)
output = ht.random.randn(5, 2 * ht.MPI_WORLD.size, dtype=ht.float64, device=ht_device)

# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
Expand All @@ -1528,14 +1528,14 @@ def test_igather(self):
(
output._DNDarray__array
== torch.ones(
(5, 2 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
(5, 2 * ht.MPI_WORLD.size), dtype=torch.float64, device=device
)
).all()
)

# non-contiguous data
data = ht.ones((3, 5), dtype=ht.float64, device=ht_device).T
output = ht.random.randn(5, 3 * ht.MPI_WORLD.size, device=ht_device)
output = ht.random.randn(5, 3 * ht.MPI_WORLD.size, dtype=ht.float64, device=ht_device)

# ensure prior invariants
self.assertFalse(data._DNDarray__array.is_contiguous())
Expand All @@ -1551,14 +1551,14 @@ def test_igather(self):
(
output._DNDarray__array
== torch.ones(
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float64, device=device
)
).all()
)

# non-contiguous output, different gather axis
data = ht.ones((5, 3), dtype=ht.float64, device=ht_device)
output = ht.random.randn(3 * ht.MPI_WORLD.size, 5, device=ht_device).T
output = ht.random.randn(3 * ht.MPI_WORLD.size, 5, dtype=ht.float64, device=ht_device).T

# ensure prior invariants
self.assertTrue(data._DNDarray__array.is_contiguous())
Expand All @@ -1574,7 +1574,7 @@ def test_igather(self):
(
output._DNDarray__array
== torch.ones(
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float32, device=device
(5, 3 * ht.MPI_WORLD.size), dtype=torch.float64, device=device
)
).all()
)
Expand Down
16 changes: 8 additions & 8 deletions heat/core/tests/test_manipulations.py
Original file line number Diff line number Diff line change
Expand Up @@ -1003,8 +1003,8 @@ def test_squeeze(self):
# 4D local tensor, no axis
result = ht.squeeze(data)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float64)
self.assertEqual(result._DNDarray__array.dtype, torch.float64)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result._DNDarray__array.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
Expand All @@ -1013,8 +1013,8 @@ def test_squeeze(self):
# 4D local tensor, major axis
result = ht.squeeze(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float64)
self.assertEqual(result._DNDarray__array.dtype, torch.float64)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result._DNDarray__array.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5, 1))
self.assertEqual(result.lshape, (4, 5, 1))
self.assertEqual(result.split, None)
Expand All @@ -1023,8 +1023,8 @@ def test_squeeze(self):
# 4D local tensor, minor axis
result = ht.squeeze(data, axis=-1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float64)
self.assertEqual(result._DNDarray__array.dtype, torch.float64)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result._DNDarray__array.dtype, torch.float32)
self.assertEqual(result.shape, (1, 4, 5))
self.assertEqual(result.lshape, (1, 4, 5))
self.assertEqual(result.split, None)
Expand All @@ -1033,8 +1033,8 @@ def test_squeeze(self):
# 4D local tensor, tuple axis
result = data.squeeze(axis=(0, -1))
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float64)
self.assertEqual(result._DNDarray__array.dtype, torch.float64)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result._DNDarray__array.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
Expand Down

0 comments on commit 0762ebd

Please sign in to comment.