Skip to content

Problems with clamp/clamp_min/clamp_max/minimum/maximum #93784

@ngimel

Description

@ngimel

Repro:

import torch
import torchdynamo
import torchinductor
from torchinductor import config

torchinductor.config.debug = True

def relu(x0):
    return torch.nn.functional.relu(x0)

def maximum(x0, x1):
    return (torch.clamp_min(x0, 0.5), torch.maximum(x0, x1))

def clamp_min_tensor(x0, x1):
    return torch.clamp_min(x0, x1)

device='cuda'
dtype=torch.half
x0 = torch.arange(-3, 4, 1, device=device, dtype=dtype)
x1 = torch.zeros_like(x0)
x0[1]=float('nan')
x0[-1]=float('nan')
x1[2]=float('nan')
print(x0)
optimize_ctx = torchdynamo.optimize("inductor")
with optimize_ctx:
    out_inductor = (relu(x0), maximum(x0, x1))
out_eager = (relu(x0), maximum(x0, x1)) 
print(out_inductor) #clamp_min doesn't propagate nans, maximum propagates nans only from one of the args
print(out_eager)
x0=torch.randint(4,(7,), device=device)
with optimize_ctx:
    out_inductor=maximum(x0, x1)
out_eager=maximum(x0, x1)
print(out_inductor) #clamp_min doesn't type promote
print(out_eager)
x0 = torch.randn(7, device=device, dtype=dtype)
with optimize_ctx:
    out_inductor=clamp_min_tensor(x0, x1) #errors out
  • clamp_min doesn't propagate nans,
  • maximum propagates nans only from the second arg
  • clamp_min doesn't type promote
  • clamp_min with tensor arg errors

relu propagates nans correctly, luckily
More systematically, we should be running OpInfo tests with inductor.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions