Skip to content

Commit

Permalink
replaced true_div with true_divide
Browse files Browse the repository at this point in the history
  • Loading branch information
sudarsan2k5 committed Feb 7, 2023
1 parent a5008d1 commit d0ce50d
Show file tree
Hide file tree
Showing 22 changed files with 105 additions and 103 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ Getting started
aesara.dprint(d)
# Elemwise{add,no_inplace} [id A] ''
# |InplaceDimShuffle{x} [id B] ''
# | |Elemwise{true_div,no_inplace} [id C] ''
# | |Elemwise{true_divide,no_inplace} [id C] ''
# | |a [id D]
# | |a [id D]
# |dot [id E] ''
Expand Down
8 changes: 4 additions & 4 deletions aesara/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,8 +793,8 @@ def __sub__(self, other):
def __mul__(self, other):
return mul(self, other)

def __truediv__(self, other):
return true_div(self, other)
def __truedivide__(self, other):
return true_divide(self, other)

def __floordiv__(self, other):
return int_div(self, other)
Expand Down Expand Up @@ -2035,7 +2035,7 @@ def grad(self, inputs, gout):
return first_part, second_part


true_div = TrueDiv(upcast_out, name="true_div")
true_divide = TrueDiv(upcast_out, name="true_divide")


class IntDiv(BinaryScalarOp):
Expand Down Expand Up @@ -2869,7 +2869,7 @@ def c_code(self, node, name, inputs, outputs, sub):
pprint.assign(mul, printing.OperatorPrinter("*", -1, "either"))
pprint.assign(sub, printing.OperatorPrinter("-", -2, "left"))
pprint.assign(neg, printing.OperatorPrinter("-", 0, "either"))
pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left"))
pprint.assign(pow, printing.OperatorPrinter("**", 1, "right"))
pprint.assign(mod, printing.OperatorPrinter("%", -1, "left"))
Expand Down
4 changes: 2 additions & 2 deletions aesara/scalar/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
log,
log1p,
switch,
true_div,
true_divide,
upcast,
upgrade_to_float,
upgrade_to_float64,
Expand Down Expand Up @@ -1241,7 +1241,7 @@ def impl(self, x):
def grad(self, inp, grads):
(x,) = inp
(gz,) = grads
res = true_div(-1.0, expm1(-x))
res = true_divide(-1.0, expm1(-x))
# Correct gradient at 0.0 to be -inf
res = switch(isinf(res), -np.inf, res)
return [gz * res]
Expand Down
2 changes: 1 addition & 1 deletion aesara/sparse/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ def to_dense(self, *args, **kwargs):
"__pow__",
"__mod__",
"__divmod__",
"__truediv__",
"__truedivide__",
"__floordiv__",
"reshape",
"dimshuffle",
Expand Down
2 changes: 1 addition & 1 deletion aesara/tensor/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ class Elemwise(OpenMPOp):
-``Elemwise(add, {0 : 1})``: represents ``+=`` on the second argument ``y += x``
-``Elemwise(mul)(np.random.random((10, 5)), np.random.random((1, 5)))``:
the second input is completed along the first dimension to match the first input
-``Elemwise(true_div)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the
-``Elemwise(true_divide)(np.random.random(10, 5), np.random.random(10, 1))``: same but along the
second dimension
-``Elemwise(int_div)(np.random.random((1, 5)), np.random.random((10, 1)))``:
the output has size ``(10, 5)``.
Expand Down
14 changes: 7 additions & 7 deletions aesara/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1608,7 +1608,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None)

# Cast shp into a float type
# TODO Once we have a consistent casting policy, we could simply
# use true_div.
# use true_divide.
if s.dtype in ("float16", "float32", "complex64"):
shp = cast(shp, "float32")
else:
Expand All @@ -1625,7 +1625,7 @@ def mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None)

# This sequential division will possibly be optimized by Aesara:
for i in axis:
s = true_div(s, shp[i])
s = true_divide(s, shp[i])

# This can happen when axis is an empty list/tuple
if s.dtype != shp.dtype and s.dtype in discrete_dtypes:
Expand Down Expand Up @@ -1697,7 +1697,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
shp = shape(input) - ddof
v = sum((centered_input**two), axis=axis, keepdims=keepdims)
for i in axis:
v = true_div(v, shp[i])
v = true_divide(v, shp[i])

# use 'corrected_two_pass' algorithm
if corrected:
Expand All @@ -1708,7 +1708,7 @@ def var(input, axis=None, ddof=0, keepdims=False, corrected=False):
shp_inp = shape(input)
error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2
for i in axis:
error = true_div(error, shp[i] * shp_inp[i])
error = true_divide(error, shp[i] * shp_inp[i])
v = v - error

v.name = "var"
Expand Down Expand Up @@ -1794,7 +1794,7 @@ def mul(a, *other_terms):


@scalar_elemwise
def true_div(a, b):
def true_divide(a, b):
"""elementwise [true] division (inverse of multiplication)"""
# see decorator for function body

Expand Down Expand Up @@ -1876,7 +1876,7 @@ def clip(x, min, max):
pprint.assign(mul, printing.OperatorPrinter("*", -1, "either"))
pprint.assign(sub, printing.OperatorPrinter("-", -2, "left"))
pprint.assign(neg, printing.OperatorPrinter("-", 0, "either"))
pprint.assign(true_div, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(true_divide, printing.OperatorPrinter("/", -1, "left"))
pprint.assign(int_div, printing.OperatorPrinter("//", -1, "left"))
pprint.assign(pow, printing.OperatorPrinter("**", 1, "right"))

Expand Down Expand Up @@ -3121,7 +3121,7 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None
"add",
"sub",
"mul",
"true_div",
"true_divide",
"int_div",
"floor_div",
"ceil_intdiv",
Expand Down
6 changes: 3 additions & 3 deletions aesara/tensor/nnet/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
softplus,
)
from aesara.tensor.math import sum as at_sum
from aesara.tensor.math import tanh, tensordot, true_div
from aesara.tensor.math import tanh, tensordot, true_divide
from aesara.tensor.nnet.blocksparse import sparse_block_dot
from aesara.tensor.rewriting.basic import (
register_canonicalize,
Expand Down Expand Up @@ -1342,7 +1342,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node):
out_grad = -out_grad
incr = incr.owner.inputs[0]

if incr.owner and incr.owner.op == true_div:
if incr.owner and incr.owner.op == true_divide:
num, denom = incr.owner.inputs

# set out_grad according to the numerator, it may be divided later
Expand Down Expand Up @@ -1406,7 +1406,7 @@ def local_advanced_indexing_crossentropy_onehot_grad(fgraph, node):
# it was really case 1.

# Second case
elif d_sm.owner and d_sm.owner.op == true_div:
elif d_sm.owner and d_sm.owner.op == true_divide:
# we're looking for
# AdvIncSubtensor(zeros, grad_nll, arange(len(y)), y) / softmax
try:
Expand Down
4 changes: 2 additions & 2 deletions aesara/tensor/nnet/batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.graph.rewriting.basic import copy_stack_trace, node_rewriter
from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_div
from aesara.scalar import Composite, add, as_common_dtype, mul, sub, true_divide
from aesara.tensor import basic as at
from aesara.tensor.basic import as_tensor_variable
from aesara.tensor.elemwise import Elemwise
Expand All @@ -27,7 +27,7 @@ def __init__(self, dtype):
std = aesara.scalar.ScalarType(dtype=dtype).make_variable()
gamma = aesara.scalar.ScalarType(dtype=dtype).make_variable()
beta = aesara.scalar.ScalarType(dtype=dtype).make_variable()
o = add(mul(true_div(sub(x, mean), std), gamma), beta)
o = add(mul(true_divide(sub(x, mean), std), gamma), beta)
inputs = [x, mean, std, gamma, beta]
outputs = [o]
super().__init__(inputs, outputs)
Expand Down

0 comments on commit d0ce50d

Please sign in to comment.