Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename true_div to true_divide to match with NumPy #1414

Merged
merged 6 commits into from
Feb 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 11 additions & 3 deletions aesara/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -794,7 +794,7 @@ def __mul__(self, other):
return mul(self, other)

def __truediv__(self, other):
return true_div(self, other)
return true_divide(self, other)

def __floordiv__(self, other):
return int_div(self, other)
Expand Down Expand Up @@ -2035,7 +2035,10 @@ def grad(self, inputs, gout):
return first_part, second_part


true_div = TrueDiv(upcast_out, name="true_div")
true_divide = TrueDiv(upcast_out, name="true_divide")

true_div = true_divide
divide = true_divide


class IntDiv(BinaryScalarOp):
Expand Down Expand Up @@ -2869,7 +2872,7 @@ def c_code(self, node, name, inputs, outputs, sub):
pprint.assign(mul, printing.OperatorPrinter("*", -1, "either"))
pprint.assign(sub, printing.OperatorPrinter("-", -2, "left"))
pprint.assign(neg, printing.OperatorPrinter("-", 0, "either"))
pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left"))
pprint.assign(pow, printing.OperatorPrinter("**", 1, "right"))
pprint.assign(mod, printing.OperatorPrinter("%", -1, "left"))
Expand Down Expand Up @@ -4455,6 +4458,11 @@ def handle_composite(node, mapping):
("Inv", "`Inv` is deprecated; use `Reciprocal` instead.", Reciprocal),
("inv", "`inv` is deprecated; use `reciprocal` instead.", reciprocal),
("Scalar", "`Scalar` is deprecated; use `ScalarType` instead.", ScalarType),
(
"true_div",
"`true_div` is deprecated; use `true_divide` or `divide` instead.",
true_divide,
),
]


Expand Down
4 changes: 2 additions & 2 deletions aesara/scalar/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
log,
log1p,
switch,
true_div,
true_divide,
upcast,
upgrade_to_float,
upgrade_to_float64,
Expand Down Expand Up @@ -1241,7 +1241,7 @@ def impl(self, x):
def grad(self, inp, grads):
(x,) = inp
(gz,) = grads
res = true_div(-1.0, expm1(-x))
res = true_divide(-1.0, expm1(-x))
# Correct gradient at 0.0 to be -inf
res = switch(isinf(res), -np.inf, res)
return [gz * res]
Expand Down
2 changes: 1 addition & 1 deletion aesara/tensor/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ class Elemwise(OpenMPOp):
-``Elemwise(add, {0 : 1})``: represents ``+=`` on the second argument ``y += x``
-``Elemwise(mul)(np.random.random((10, 5)), np.random.random((1, 5)))``:
the second input is completed along the first dimension to match the first input
-``Elemwise(true_div)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the
-``Elemwise(true_divide)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the
second dimension
-``Elemwise(int_div)(np.random.random((1, 5)), np.random.random((10, 1)))``:
the output has size ``(10, 5)``.
Expand Down
14 changes: 7 additions & 7 deletions aesara/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1608,7 +1608,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None)

# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
# use true_divide.
if s.dtype in ("float16", "float32", "complex64"):
shp = cast(shp, "float32")
else:
Expand All @@ -1625,7 +1625,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None)

# This sequential division will possibly be optimized by Aesara:
for i in axis:
s = true_div(s, shp[i])
s = true_divide(s, shp[i])

# This can happen when axis is an empty list/tuple
if s.dtype != shp.dtype and s.dtype in discrete_dtypes:
Expand Down Expand Up @@ -1697,7 +1697,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
shp = shape(input) - ddof
v = sum((centered_input**two), axis=axis, keepdims=keepdims)
for i in axis:
v = true_div(v, shp[i])
v = true_divide(v, shp[i])

# use 'corrected_two_pass' algorithm
if corrected:
Expand All @@ -1708,7 +1708,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
shp_inp = shape(input)
error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2
for i in axis:
error = true_div(error, shp[i] * shp_inp[i])
error = true_divide(error, shp[i] * shp_inp[i])
v = v - error

v.name = "var"
Expand Down Expand Up @@ -1794,7 +1794,7 @@ def mul(a, *other_terms):


@scalar_elemwise
def true_div(a, b):
def true_divide(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body

Expand Down Expand Up @@ -1876,7 +1876,7 @@ def clip(x, min, max):
pprint.assign(mul, printing.OperatorPrinter("*", -1, "either"))
pprint.assign(sub, printing.OperatorPrinter("-", -2, "left"))
pprint.assign(neg, printing.OperatorPrinter("-", 0, "either"))
pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left"))
pprint.assign(pow, printing.OperatorPrinter("**", 1, "right"))

Expand Down Expand Up @@ -3121,7 +3121,7 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None
"add",
"sub",
"mul",
"true_div",
"true_divide",
"int_div",
"floor_div",
"ceil_intdiv",
Expand Down
6 changes: 3 additions & 3 deletions aesara/tensor/nnet/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
softplus,
)
from aesara.tensor.math import sum as at_sum
from aesara.tensor.math import tanh, tensordot, true_div
from aesara.tensor.math import tanh, tensordot, true_divide
from aesara.tensor.nnet.blocksparse import sparse_block_dot
from aesara.tensor.rewriting.basic import (
register_canonicalize,
Expand Down Expand Up @@ -1342,7 +1342,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node):
out_grad = -out_grad
incr = incr.owner.inputs[0]

if incr.owner and incr.owner.op == true_div:
if incr.owner and incr.owner.op == true_divide:
num, denom = incr.owner.inputs

# set out_grad according to the numerator, it may be divided later
Expand Down Expand Up @@ -1406,7 +1406,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node):
# it was really case 1.

# Second case
elif d_sm.owner and d_sm.owner.op == true_div:
elif d_sm.owner and d_sm.owner.op == true_divide:
# we're looking for
# AdvIncSubtensor(zeros, grad_nll, arange(len(y)), y) / softmax
try:
Expand Down
4 changes: 2 additions & 2 deletions aesara/tensor/nnet/batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.graph.rewriting.basic import copy_stack_trace, node_rewriter
from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_div
from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_divide
from aesara.tensor import basic as at
from aesara.tensor.basic import as_tensor_variable
from aesara.tensor.elemwise import Elemwise
Expand All @@ -27,7 +27,7 @@ def __init__(self, dtype):
std = aesara.scalar.ScalarType(dtype=dtype).make_variable()
gamma = aesara.scalar.ScalarType(dtype=dtype).make_variable()
beta = aesara.scalar.ScalarType(dtype=dtype).make_variable()
o = add(mul(true_div(sub(x, mean), std), gamma), beta)
o = add(mul(true_divide(sub(x, mean), std), gamma), beta)
inputs = [x, mean, std, gamma, beta]
outputs = [o]
super().__init__(inputs, outputs)
Expand Down