3,324 changes: 3,324 additions & 0 deletions docs/source/learn/core_notebooks/dims_module.ipynb

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion docs/source/learn/core_notebooks/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
:maxdepth: 1

pymc_overview
GLM_linear
model_comparison
posterior_predictive
dimensionality
pymc_pytensor
dims_module
GLM_linear
Gaussian_Processes
:::

Expand Down
33 changes: 32 additions & 1 deletion pymc/backends/arviz.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,41 @@

_log = logging.getLogger(__name__)


RAISE_ON_INCOMPATIBLE_COORD_LENGTHS = False


# random variable object ...
Var = Any


def dict_to_dataset_drop_incompatible_coords(vars_dict, *args, dims, coords, **kwargs):
safe_coords = coords

if not RAISE_ON_INCOMPATIBLE_COORD_LENGTHS:
coords_lengths = {k: len(v) for k, v in coords.items()}
for var_name, var in vars_dict.items():
# Iterate in reversed because of chain/draw batch dimensions
for dim, dim_length in zip(reversed(dims.get(var_name, ())), reversed(var.shape)):
coord_length = coords_lengths.get(dim, None)
if (coord_length is not None) and (coord_length != dim_length):
warnings.warn(
f"Incompatible coordinate length of {coord_length} for dimension '{dim}' of variable '{var_name}'.\n"
"This usually happens when a sliced or concatenated variable is wrapped as a `pymc.dims.Deterministic`."
"The originate coordinates for this dim will not be included in the returned dataset for any of the variables. "
"Instead they will default to `np.arange(var_length)` and the shorter variables will be right-padded with nan.\n"
"To make this warning into an error set `pymc.backends.arviz.RAISE_ON_INCOMPATIBLE_COORD_LENGTHS` to `True`",
UserWarning,
)
if safe_coords is coords:
safe_coords = coords.copy()
safe_coords.pop(dim)
coords_lengths.pop(dim)

# FIXME: Would be better to drop coordinates altogether, but arviz defaults to `np.arange(var_length)`
return dict_to_dataset(vars_dict, *args, dims=dims, coords=safe_coords, **kwargs)


def find_observations(model: "Model") -> dict[str, Var]:
"""If there are observations available, return them as a dictionary."""
observations = {}
Expand Down Expand Up @@ -366,7 +397,7 @@ def priors_to_xarray(self):
priors_dict[group] = (
None
if var_names is None
else dict_to_dataset(
else dict_to_dataset_drop_incompatible_coords(
{k: np.expand_dims(self.prior[k], 0) for k in var_names},
library=pymc,
coords=self.coords,
Expand Down
24 changes: 15 additions & 9 deletions pymc/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,12 @@
# limitations under the License.

import io
import typing
import urllib.request

from collections.abc import Sequence
from copy import copy
from typing import cast
from typing import Union, cast

import numpy as np
import pandas as pd
Expand All @@ -32,12 +33,13 @@
from pytensor.tensor.random.basic import IntegersRV
from pytensor.tensor.variable import TensorConstant, TensorVariable

import pymc as pm

from pymc.logprob.utils import rvs_in_graph
from pymc.pytensorf import convert_data
from pymc.exceptions import ShapeError
from pymc.pytensorf import convert_data, rvs_in_graph
from pymc.vartypes import isgenerator

if typing.TYPE_CHECKING:
from pymc.model.core import Model

__all__ = [
"Data",
"Minibatch",
Expand Down Expand Up @@ -197,7 +199,7 @@ def determine_coords(

if isinstance(value, np.ndarray) and dims is not None:
if len(dims) != value.ndim:
raise pm.exceptions.ShapeError(
raise ShapeError(
"Invalid data shape. The rank of the dataset must match the length of `dims`.",
actual=value.shape,
expected=value.ndim,
Expand All @@ -222,6 +224,7 @@ def Data(
dims: Sequence[str] | None = None,
coords: dict[str, Sequence | np.ndarray] | None = None,
infer_dims_and_coords=False,
model: Union["Model", None] = None,
**kwargs,
) -> SharedVariable | TensorConstant:
"""Create a data container that registers a data variable with the model.
Expand Down Expand Up @@ -286,15 +289,18 @@ def Data(
... model.set_data("data", data_vals)
... idatas.append(pm.sample())
"""
from pymc.model.core import modelcontext

if coords is None:
coords = {}

if isinstance(value, list):
value = np.array(value)

# Add data container to the named variables of the model.
model = pm.Model.get_context(error_if_none=False)
if model is None:
try:
model = modelcontext(model)
except TypeError:
raise TypeError(
"No model on context stack, which is needed to instantiate a data container. "
"Add variable inside a 'with model:' block."
Expand All @@ -321,7 +327,7 @@ def Data(
if isinstance(dims, str):
dims = (dims,)
if not (dims is None or len(dims) == x.ndim):
raise pm.exceptions.ShapeError(
raise ShapeError(
"Length of `dims` must match the dimensions of the dataset.",
actual=len(dims),
expected=x.ndim,
Expand Down
74 changes: 74 additions & 0 deletions pymc/dims/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def __init__():
"""Make PyMC aware of the xtensor functionality.
This should be done eagerly once development matures.
"""
import datetime
import warnings

from pytensor.compile import optdb

from pymc.initial_point import initial_point_rewrites_db
from pymc.logprob.abstract import MeasurableOp
from pymc.logprob.rewriting import logprob_rewrites_db

# Filter PyTensor xtensor warning, we emmit our own warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
import pytensor.xtensor

from pytensor.xtensor.vectorization import XRV

# Make PyMC aware of xtensor functionality
MeasurableOp.register(XRV)
logprob_rewrites_db.register(
"pre_lower_xtensor", optdb.query("+lower_xtensor"), "basic", position=0.1
)
logprob_rewrites_db.register(
"post_lower_xtensor", optdb.query("+lower_xtensor"), "cleanup", position=5.1
)
initial_point_rewrites_db.register(
"lower_xtensor", optdb.query("+lower_xtensor"), "basic", position=0.1
)

# TODO: Better model of probability of bugs
day_of_conception = datetime.date(2025, 6, 17)
day_of_last_bug = datetime.date(2025, 6, 30)
today = datetime.date.today()
days_with_bugs = (day_of_last_bug - day_of_conception).days
days_without_bugs = (today - day_of_last_bug).days
p = 1 - (days_without_bugs / (days_without_bugs + days_with_bugs + 10))
if p > 0.05:
warnings.warn(
f"The `pymc.dims` module is experimental and may contain critical bugs (p={p:.3f}).\n"
"Please report any issues you encounter at https://github.com/pymc-devs/pymc/issues.\n"
"API changes are expected in future releases.\n",
UserWarning,
stacklevel=2,
)


__init__()
del __init__

from pytensor.xtensor import as_xtensor, broadcast, concat, dot, full_like, ones_like, zeros_like
from pytensor.xtensor.basic import tensor_from_xtensor

from pymc.dims import math
from pymc.dims.distributions import *
from pymc.dims.model import Data, Deterministic, Potential
15 changes: 15 additions & 0 deletions pymc/dims/distributions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymc.dims.distributions.scalar import *
from pymc.dims.distributions.vector import *
284 changes: 284 additions & 0 deletions pymc/dims/distributions/core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,284 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable, Sequence
from itertools import chain
from typing import cast

import numpy as np

from pytensor.graph import node_rewriter
from pytensor.graph.basic import Variable
from pytensor.tensor.elemwise import DimShuffle
from pytensor.tensor.random.op import RandomVariable
from pytensor.xtensor import as_xtensor
from pytensor.xtensor.basic import XTensorFromTensor, xtensor_from_tensor
from pytensor.xtensor.type import XTensorVariable

from pymc import SymbolicRandomVariable, modelcontext
from pymc.dims.distributions.transforms import DimTransform, log_odds_transform, log_transform
from pymc.distributions.distribution import _support_point, support_point
from pymc.distributions.shape_utils import DimsWithEllipsis, convert_dims_with_ellipsis
from pymc.logprob.abstract import MeasurableOp, _logprob
from pymc.logprob.rewriting import measurable_ir_rewrites_db
from pymc.logprob.tensor import MeasurableDimShuffle
from pymc.logprob.utils import filter_measurable_variables
from pymc.util import UNSET


@_support_point.register(DimShuffle)
def dimshuffle_support_point(ds_op, _, rv):
# We implement support point for DimShuffle because
# DimDistribution can register a transposed version of a variable.

return ds_op(support_point(rv))


@_support_point.register(XTensorFromTensor)
def xtensor_from_tensor_support_point(xtensor_op, _, rv):
# We remove the xtensor_from_tensor operation, so initial_point doesn't have to do a further lowering
return xtensor_op(support_point(rv))


class MeasurableXTensorFromTensor(MeasurableOp, XTensorFromTensor):
__props__ = ("dims", "core_dims") # type: ignore[assignment]

def __init__(self, dims, core_dims):
super().__init__(dims=dims)
self.core_dims = tuple(core_dims) if core_dims is not None else None


@node_rewriter([XTensorFromTensor])
def find_measurable_xtensor_from_tensor(fgraph, node) -> list[XTensorVariable] | None:
if isinstance(node.op, MeasurableXTensorFromTensor):
return None

xs = filter_measurable_variables(node.inputs)

if not xs:
# Check if we have a transposition instead
# The rewrite that introduces measurable tranpsoses refuses to apply to multivariate RVs
# So we have a chance of inferring the core dims!
[ds] = node.inputs
ds_node = ds.owner
if not (
ds_node is not None
and isinstance(ds_node.op, DimShuffle)
and ds_node.op.is_transpose
and filter_measurable_variables(ds_node.inputs)
):
return None
[x] = ds_node.inputs
if not (
x.owner is not None and isinstance(x.owner.op, RandomVariable | SymbolicRandomVariable)
):
return None

measurable_x = MeasurableDimShuffle(**ds_node.op._props_dict())(x) # type: ignore[attr-defined]

ndim_supp = x.owner.op.ndim_supp
if ndim_supp:
inverse_transpose = np.argsort(ds_node.op.shuffle)
dims = node.op.dims
dims_before_transpose = tuple(dims[i] for i in inverse_transpose)
core_dims = dims_before_transpose[-ndim_supp:]
else:
core_dims = ()

new_out = MeasurableXTensorFromTensor(dims=node.op.dims, core_dims=core_dims)(measurable_x)
else:
# If this happens we know there's no measurable transpose in between and we can
# safely infer the core_dims positionally when the inner logp is returned
new_out = MeasurableXTensorFromTensor(dims=node.op.dims, core_dims=None)(*node.inputs)
return [cast(XTensorVariable, new_out)]


@_logprob.register(MeasurableXTensorFromTensor)
def measurable_xtensor_from_tensor(op, values, rv, **kwargs):
rv_logp = _logprob(rv.owner.op, tuple(v.values for v in values), *rv.owner.inputs, **kwargs)
if op.core_dims is None:
# The core_dims of the inner rv are on the right
dims = op.dims[: rv_logp.ndim]
else:
# We inferred where the core_dims are!
dims = [d for d in op.dims if d not in op.core_dims]
return xtensor_from_tensor(rv_logp, dims=dims)


measurable_ir_rewrites_db.register(
"measurable_xtensor_from_tensor", find_measurable_xtensor_from_tensor, "basic", "xtensor"
)


class DimDistribution:
"""Base class for PyMC distribution that wrap pytensor.xtensor.random operations, and follow xarray-like semantics."""

xrv_op: Callable
default_transform: DimTransform | None = None

@staticmethod
def _as_xtensor(x):
try:
return as_xtensor(x)
except TypeError:
raise ValueError(
f"Variable {x} must have dims associated with it.\n"
"To avoid subtle bugs, PyMC does not make any assumptions about the dims of parameters.\n"
"Use `as_xtensor` with the `dims` keyword argument to specify the dims explicitly."
)

def __new__(
cls,
name: str,
*dist_params,
dims: DimsWithEllipsis | None = None,
initval=None,
observed=None,
total_size=None,
transform=UNSET,
default_transform=UNSET,
model=None,
**kwargs,
):
try:
model = modelcontext(model)
except TypeError:
raise TypeError(
"No model on context stack, which is needed to instantiate distributions. "
"Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution."
)

if not isinstance(name, str):
raise TypeError(f"Name needs to be a string but got: {name}")

dims = convert_dims_with_ellipsis(dims)
if dims is None:
dim_lengths = {}
else:
try:
dim_lengths = {dim: model.dim_lengths[dim] for dim in dims if dim is not Ellipsis}
except KeyError:
raise ValueError(
f"Not all dims {dims} are part of the model coords. "
f"Add them at initialization time or use `model.add_coord` before defining the distribution."
)

if observed is not None:
observed = cls._as_xtensor(observed)

# Propagate observed dims to dim_lengths
for observed_dim in observed.type.dims:
if observed_dim not in dim_lengths:
dim_lengths[observed_dim] = model.dim_lengths[observed_dim]

rv = cls.dist(*dist_params, dim_lengths=dim_lengths, **kwargs)

# User provided dims must specify all dims or use ellipsis
if dims is not None:
if (... not in dims) and (set(dims) != set(rv.type.dims)):
raise ValueError(
f"Provided dims {dims} do not match the distribution's output dims {rv.type.dims}. "
"Use ellipsis to specify all other dimensions."
)
# Use provided dims to transpose the output to the desired order
rv = rv.transpose(*dims)

rv_dims = rv.type.dims
if observed is None:
if default_transform is UNSET:
default_transform = cls.default_transform
else:
# Align observed dims with those of the RV
# TODO: If this fails give a more informative error message
observed = observed.transpose(*rv_dims)

# Check user didn't pass regular transforms
if transform not in (UNSET, None):
if not isinstance(transform, DimTransform):
raise TypeError(
f"Transform must be a DimTransform, form pymc.dims.transforms, but got {type(transform)}."
)
if default_transform not in (UNSET, None):
if not isinstance(default_transform, DimTransform):
raise TypeError(
f"default_transform must be a DimTransform, from pymc.dims.transforms, but got {type(default_transform)}."
)

rv = model.register_rv(
rv,
name=name,
observed=observed,
total_size=total_size,
dims=rv_dims,
transform=transform,
default_transform=default_transform,
initval=initval,
)

return as_xtensor(rv, dims=rv_dims)

@classmethod
def dist(
cls,
dist_params,
*,
dim_lengths: dict[str, Variable | int] | None = None,
core_dims: str | Sequence[str] | None = None,
**kwargs,
) -> XTensorVariable:
for invalid_kwarg in ("size", "shape", "dims"):
if invalid_kwarg in kwargs:
raise TypeError(f"DimDistribution does not accept {invalid_kwarg} argument.")

# XRV requires only extra_dims, not dims
dist_params = [cls._as_xtensor(param) for param in dist_params]

if dim_lengths is None:
extra_dims = None
else:
# Exclude dims that are implied by the parameters or core_dims
implied_dims = set(chain.from_iterable(param.type.dims for param in dist_params))
if core_dims is not None:
if isinstance(core_dims, str):
implied_dims.add(core_dims)
else:
implied_dims.update(core_dims)

extra_dims = {
dim: length for dim, length in dim_lengths.items() if dim not in implied_dims
}
return cls.xrv_op(*dist_params, extra_dims=extra_dims, core_dims=core_dims, **kwargs)


class VectorDimDistribution(DimDistribution):
@classmethod
def dist(self, *args, core_dims: str | Sequence[str] | None = None, **kwargs):
# Add a helpful error message if core_dims is not provided
if core_dims is None:
raise ValueError(
f"{self.__name__} requires core_dims to be specified, as it involves non-scalar inputs or outputs."
"Check the documentation of the distribution for details."
)
return super().dist(*args, core_dims=core_dims, **kwargs)


class PositiveDimDistribution(DimDistribution):
"""Base class for positive continuous distributions."""

default_transform = log_transform


class UnitDimDistribution(DimDistribution):
"""Base class for unit-valued distributions."""

default_transform = log_odds_transform
191 changes: 191 additions & 0 deletions pymc/dims/distributions/scalar.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytensor.xtensor as ptx
import pytensor.xtensor.random as pxr

from pytensor.xtensor import as_xtensor

from pymc.dims.distributions.core import (
DimDistribution,
PositiveDimDistribution,
UnitDimDistribution,
)
from pymc.distributions.continuous import Beta as RegularBeta
from pymc.distributions.continuous import Gamma as RegularGamma
from pymc.distributions.continuous import HalfStudentTRV, flat, halfflat


def _get_sigma_from_either_sigma_or_tau(*, sigma, tau):
if sigma is not None and tau is not None:
raise ValueError("Can't pass both tau and sigma")

if sigma is None and tau is None:
return 1.0

if sigma is not None:
return sigma

return ptx.math.reciprocal(ptx.math.sqrt(tau))


class Flat(DimDistribution):
xrv_op = pxr.as_xrv(flat)

@classmethod
def dist(cls, **kwargs):
return super().dist([], **kwargs)


class HalfFlat(PositiveDimDistribution):
xrv_op = pxr.as_xrv(halfflat, [], ())

@classmethod
def dist(cls, **kwargs):
return super().dist([], **kwargs)


class Normal(DimDistribution):
xrv_op = pxr.normal

@classmethod
def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):
sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau)
return super().dist([mu, sigma], **kwargs)


class HalfNormal(PositiveDimDistribution):
xrv_op = pxr.halfnormal

@classmethod
def dist(cls, sigma=None, *, tau=None, **kwargs):
sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau)
return super().dist([0.0, sigma], **kwargs)


class LogNormal(PositiveDimDistribution):
xrv_op = pxr.lognormal

@classmethod
def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs):
sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau)
return super().dist([mu, sigma], **kwargs)


class StudentT(DimDistribution):
xrv_op = pxr.t

@classmethod
def dist(cls, nu, mu=0, sigma=None, *, lam=None, **kwargs):
sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=lam)
return super().dist([nu, mu, sigma], **kwargs)


class HalfStudentT(PositiveDimDistribution):
@classmethod
def dist(cls, nu, sigma=None, *, lam=None, **kwargs):
sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=lam)
return super().dist([nu, sigma], **kwargs)

@classmethod
def xrv_op(self, nu, sigma, core_dims=None, extra_dims=None, rng=None):
nu = as_xtensor(nu)
sigma = as_xtensor(sigma)
core_rv = HalfStudentTRV.rv_op(nu=nu.values, sigma=sigma.values).owner.op
xop = pxr.as_xrv(core_rv)
return xop(nu, sigma, core_dims=core_dims, extra_dims=extra_dims, rng=rng)


class Cauchy(DimDistribution):
xrv_op = pxr.cauchy

@classmethod
def dist(cls, alpha, beta, **kwargs):
return super().dist([alpha, beta], **kwargs)


class HalfCauchy(PositiveDimDistribution):
xrv_op = pxr.halfcauchy

@classmethod
def dist(cls, beta, **kwargs):
return super().dist([0.0, beta], **kwargs)


class Beta(UnitDimDistribution):
xrv_op = pxr.beta

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, nu=None, **kwargs):
alpha, beta = RegularBeta.get_alpha_beta(alpha=alpha, beta=beta, mu=mu, sigma=sigma, nu=nu)
return super().dist([alpha, beta], **kwargs)


class Laplace(DimDistribution):
xrv_op = pxr.laplace

@classmethod
def dist(cls, mu=0, b=1, **kwargs):
return super().dist([mu, b], **kwargs)


class Exponential(PositiveDimDistribution):
xrv_op = pxr.exponential

@classmethod
def dist(cls, lam=None, *, scale=None, **kwargs):
if lam is None and scale is None:
scale = 1.0
elif lam is not None and scale is not None:
raise ValueError("Cannot pass both 'lam' and 'scale'. Use one of them.")
elif lam is not None:
scale = 1 / lam
return super().dist([scale], **kwargs)


class Gamma(PositiveDimDistribution):
xrv_op = pxr.gamma

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sigma is not None):
# Use sign of sigma to not let negative sigma fly by
alpha = (mu**2 / sigma**2) * ptx.math.sign(sigma)
beta = mu / sigma**2
else:
raise ValueError(
"Incompatible parameterization. Either use alpha and beta, or mu and sigma."
)
alpha, beta = RegularGamma.get_alpha_beta(alpha=alpha, beta=beta, mu=mu, sigma=sigma)
return super().dist([alpha, ptx.math.reciprocal(beta)], **kwargs)


class InverseGamma(PositiveDimDistribution):
xrv_op = pxr.invgamma

@classmethod
def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs):
if alpha is not None:
if beta is None:
beta = 1.0
elif (mu is not None) and (sigma is not None):
# Use sign of sigma to not let negative sigma fly by
alpha = ((2 * sigma**2 + mu**2) / sigma**2) * ptx.math.sign(sigma)
beta = mu * (mu**2 + sigma**2) / sigma**2
else:
raise ValueError(
"Incompatible parameterization. Either use alpha and (optionally) beta, or mu and sigma"
)
return super().dist([alpha, beta], **kwargs)
95 changes: 95 additions & 0 deletions pymc/dims/distributions/transforms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytensor.tensor as pt
import pytensor.xtensor as ptx

from pymc.logprob.transforms import Transform


class DimTransform(Transform):
"""Base class for transforms that are applied to dim distriubtions."""


class LogTransform(DimTransform):
name = "log"

def forward(self, value, *inputs):
return ptx.math.log(value)

def backward(self, value, *inputs):
return ptx.math.exp(value)

def log_jac_det(self, value, *inputs):
return value


log_transform = LogTransform()


class LogOddsTransform(DimTransform):
name = "logodds"

def backward(self, value, *inputs):
return ptx.math.expit(value)

def forward(self, value, *inputs):
return ptx.math.log(value / (1 - value))

def log_jac_det(self, value, *inputs):
sigmoid_value = ptx.math.sigmoid(value)
return ptx.math.log(sigmoid_value) + ptx.math.log1p(-sigmoid_value)


log_odds_transform = LogOddsTransform()


class ZeroSumTransform(DimTransform):
name = "zerosum"

def __init__(self, dims: tuple[str, ...]):
self.dims = dims

@staticmethod
def extend_dim(array, dim):
n = (array.sizes[dim] + 1).astype("floatX")
sum_vals = array.sum(dim)
norm = sum_vals / (pt.sqrt(n) + n)
fill_val = norm - sum_vals / pt.sqrt(n)

out = ptx.concat([array, fill_val], dim=dim)
return out - norm

@staticmethod
def reduce_dim(array, dim):
n = array.sizes[dim].astype("floatX")
last = array.isel({dim: -1})

sum_vals = -last * pt.sqrt(n)
norm = sum_vals / (pt.sqrt(n) + n)
return array.isel({dim: slice(None, -1)}) + norm

def forward(self, value, *rv_inputs):
for dim in self.dims:
value = self.reduce_dim(value, dim=dim)
return value

def backward(self, value, *rv_inputs):
for dim in self.dims:
value = self.extend_dim(value, dim=dim)
return value

def log_jac_det(self, value, *rv_inputs):
# Use following once broadcast_like is implemented
# as_xtensor(0).broadcast_like(value, exclude=self.dims)`
return value.sum(self.dims) * 0
198 changes: 198 additions & 0 deletions pymc/dims/distributions/vector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytensor.xtensor as ptx
import pytensor.xtensor.random as ptxr

from pytensor.tensor import as_tensor
from pytensor.xtensor import as_xtensor
from pytensor.xtensor import random as pxr

from pymc.dims.distributions.core import VectorDimDistribution
from pymc.dims.distributions.transforms import ZeroSumTransform
from pymc.distributions.multivariate import ZeroSumNormalRV
from pymc.util import UNSET


class Categorical(VectorDimDistribution):
"""Categorical distribution.
Parameters
----------
p : xtensor_like, optional
Probabilities of each category. Must sum to 1 along the core dimension.
Must be provided if `logit_p` is not specified.
logit_p : xtensor_like, optional
Alternative parametrization using logits. Must be provided if `p` is not specified.
core_dims : str
The core dimension of the distribution, which represents the categories.
The dimension must be present in `p` or `logit_p`.
**kwargs
Other keyword arguments used to define the distribution.
Returns
-------
XTensorVariable
An xtensor variable representing the categorical distribution.
The output does not contain the core dimension, as it is absorbed into the distribution.
"""

xrv_op = ptxr.categorical

@classmethod
def dist(cls, p=None, *, logit_p=None, core_dims=None, **kwargs):
if p is not None and logit_p is not None:
raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.")
elif p is None and logit_p is None:
raise ValueError("Incompatible parametrization. Must specify either p or logit_p.")

if logit_p is not None:
p = ptx.math.softmax(logit_p, dim=core_dims)
return super().dist([p], core_dims=core_dims, **kwargs)


class MvNormal(VectorDimDistribution):
"""Multivariate Normal distribution.
Parameters
----------
mu : xtensor_like
Mean vector of the distribution.
cov : xtensor_like, optional
Covariance matrix of the distribution. Only one of `cov` or `chol` must be provided.
chol : xtensor_like, optional
Cholesky decomposition of the covariance matrix. only one of `cov` or `chol` must be provided.
lower : bool, default True
If True, the Cholesky decomposition is assumed to be lower triangular.
If False, it is assumed to be upper triangular.
core_dims: Sequence of string
Sequence of two strings representing the core dimensions of the distribution.
The two dimensions must be present in `cov` or `chol`, and exactly one must also be present in `mu`.
**kwargs
Additional keyword arguments used to define the distribution.
Returns
-------
XTensorVariable
An xtensor variable representing the multivariate normal distribution.
The output contains the core dimension that is shared between `mu` and `cov` or `chol`.
"""

xrv_op = pxr.multivariate_normal

@classmethod
def dist(cls, mu, cov=None, *, chol=None, lower=True, core_dims=None, **kwargs):
if "tau" in kwargs:
raise NotImplementedError("MvNormal does not support 'tau' parameter.")

if not (isinstance(core_dims, tuple | list) and len(core_dims) == 2):
raise ValueError("MvNormal requires 2 core_dims")

if cov is None and chol is None:
raise ValueError("Either 'cov' or 'chol' must be provided.")

if chol is not None:
d0, d1 = core_dims
if not lower:
# By logical symmetry this must be the only correct way to implement lower
# We refuse to test it because it is not useful
d1, d0 = d0, d1

chol = cls._as_xtensor(chol)
# chol @ chol.T in xarray semantics requires a rename
safe_name = "_"
if "_" in chol.type.dims:
safe_name *= max(map(len, chol.type.dims)) + 1
cov = chol.dot(chol.rename({d0: safe_name}), dim=d1).rename({safe_name: d1})

return super().dist([mu, cov], core_dims=core_dims, **kwargs)


class ZeroSumNormal(VectorDimDistribution):
"""Zero-sum multivariate normal distribution.
Parameters
----------
sigma : xtensor_like, optional
The standard deviation of the underlying unconstrained normal distribution.
Defaults to 1.0. It cannot have core dimensions.
core_dims : Sequence of str, optional
The axes along which the zero-sum constraint is applied.
**kwargs
Additional keyword arguments used to define the distribution.
Returns
-------
XTensorVariable
An xtensor variable representing the zero-sum multivariate normal distribution.
"""

@classmethod
def __new__(
cls, *args, core_dims=None, dims=None, default_transform=UNSET, observed=None, **kwargs
):
if core_dims is not None:
if isinstance(core_dims, str):
core_dims = (core_dims,)

# Create default_transform
if observed is None and default_transform is UNSET:
default_transform = ZeroSumTransform(dims=core_dims)

# If the user didn't specify dims, take it from core_dims
# We need them to be forwarded to dist in the `dim_lenghts` argument
if dims is None and core_dims is not None:
dims = (..., *core_dims)

return super().__new__(
*args,
core_dims=core_dims,
dims=dims,
default_transform=default_transform,
observed=observed,
**kwargs,
)

@classmethod
def dist(cls, sigma=1.0, *, core_dims=None, dim_lengths, **kwargs):
if isinstance(core_dims, str):
core_dims = (core_dims,)
if core_dims is None or len(core_dims) == 0:
raise ValueError("ZeroSumNormal requires atleast 1 core_dims")

support_dims = as_xtensor(
as_tensor([dim_lengths[core_dim] for core_dim in core_dims]), dims=("_",)
)
sigma = cls._as_xtensor(sigma)

return super().dist(
[sigma, support_dims], core_dims=core_dims, dim_lengths=dim_lengths, **kwargs
)

@classmethod
def xrv_op(self, sigma, support_dims, core_dims, extra_dims=None, rng=None):
sigma = as_xtensor(sigma)
support_dims = as_xtensor(support_dims, dims=("_",))
support_shape = support_dims.values
core_rv = ZeroSumNormalRV.rv_op(sigma=sigma.values, support_shape=support_shape).owner.op
xop = pxr.as_xrv(
core_rv,
core_inps_dims_map=[(), (0,)],
core_out_dims_map=tuple(range(1, len(core_dims) + 1)),
)
# Dummy "_" core dim to absorb the support_shape vector
# If ZeroSumNormal expected a scalar per support dim, this wouldn't be needed
return xop(sigma, support_dims, core_dims=("_", *core_dims), extra_dims=extra_dims, rng=rng)
15 changes: 15 additions & 0 deletions pymc/dims/math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytensor.xtensor import linalg
from pytensor.xtensor.math import *
95 changes: 95 additions & 0 deletions pymc/dims/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable

from pytensor.tensor import TensorVariable
from pytensor.xtensor import as_xtensor
from pytensor.xtensor.type import XTensorVariable

from pymc.data import Data as RegularData
from pymc.distributions.shape_utils import (
Dims,
DimsWithEllipsis,
convert_dims,
convert_dims_with_ellipsis,
)
from pymc.model.core import Deterministic as RegularDeterministic
from pymc.model.core import Model, modelcontext
from pymc.model.core import Potential as RegularPotential


def Data(
name: str, value, dims: Dims = None, model: Model | None = None, **kwargs
) -> XTensorVariable:
"""Wrapper around pymc.Data that returns an XtensorVariable.
Dimensions are required if the input is not a scalar.
These are always forwarded to the model object.
"""
model = modelcontext(model)
dims = convert_dims(dims) # type: ignore[assignment]

with model:
value = RegularData(name, value, dims=dims, **kwargs) # type: ignore[arg-type]

dims = model.named_vars_to_dims[value.name]
if dims is None and value.ndim > 0:
raise ValueError("pymc.dims.Data requires dims to be specified for non-scalar data.")

return as_xtensor(value, dims=dims, name=name) # type: ignore[arg-type]


def _register_and_return_xtensor_variable(
name: str,
value: TensorVariable | XTensorVariable,
dims: DimsWithEllipsis | None,
model: Model | None,
registration_func: Callable,
) -> XTensorVariable:
if isinstance(value, XTensorVariable):
dims = convert_dims_with_ellipsis(dims)
if dims is not None:
# If dims are provided, apply a transpose to align with the user expectation
value = value.transpose(*dims)
# Regardless of whether dims are provided, we now have them
dims = value.type.dims
else:
value = as_xtensor(value, dims=dims, name=name) # type: ignore[arg-type]
return registration_func(name, value, dims=dims, model=model)


def Deterministic(
name: str, value, dims: DimsWithEllipsis | None = None, model: Model | None = None
) -> XTensorVariable:
"""Wrapper around pymc.Deterministic that returns an XtensorVariable.
If the input is already an XTensorVariable, dims are optional. If dims are provided, the variable is aligned with them with a transpose.
If the input is not an XTensorVariable, it is converted to one using `as_xtensor`. Dims are required if the input is not a scalar.
The dimensions of the resulting XTensorVariable are always forwarded to the model object.
"""
return _register_and_return_xtensor_variable(name, value, dims, model, RegularDeterministic)


def Potential(
name: str, value, dims: DimsWithEllipsis | None = None, model: Model | None = None
) -> XTensorVariable:
"""Wrapper around pymc.Potential that returns an XtensorVariable.
If the input is already an XTensorVariable, dims are optional. If dims are provided, the variable is aligned with them with a transpose.
If the input is not an XTensorVariable, it is converted to one using `as_xtensor`. Dims are required if the input is not a scalar.
The dimensions of the resulting XTensorVariable are always forwarded to the model object.
"""
return _register_and_return_xtensor_variable(name, value, dims, model, RegularPotential)
22 changes: 6 additions & 16 deletions pymc/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,11 +487,7 @@ class Normal(Continuous):
def dist(cls, mu=0, sigma=None, tau=None, **kwargs):
tau, sigma = get_tau_sigma(tau=tau, sigma=sigma)
sigma = pt.as_tensor_variable(sigma)

# tau = pt.as_tensor_variable(tau)
# mean = median = mode = mu = pt.as_tensor_variable(floatX(mu))
# variance = 1.0 / self.tau

mu = pt.as_tensor_variable(mu)
return super().dist([mu, sigma], **kwargs)

def support_point(rv, size, mu, sigma):
Expand Down Expand Up @@ -2374,11 +2370,8 @@ def get_alpha_beta(cls, alpha=None, beta=None, mu=None, sigma=None):
if (alpha is not None) and (beta is not None):
pass
elif (mu is not None) and (sigma is not None):
if isinstance(sigma, Variable):
sigma = check_parameters(sigma, sigma > 0, msg="sigma > 0")
else:
assert np.all(np.asarray(sigma) > 0)
alpha = mu**2 / sigma**2
# Use sign of sigma to not let negative sigma fly by
alpha = (mu**2 / sigma**2) * pt.sign(sigma)
beta = mu / sigma**2
else:
raise ValueError(
Expand Down Expand Up @@ -2500,13 +2493,10 @@ def _get_alpha_beta(cls, alpha, beta, mu, sigma):
if beta is not None:
pass
else:
beta = 1
beta = 1.0
elif (mu is not None) and (sigma is not None):
if isinstance(sigma, Variable):
sigma = check_parameters(sigma, sigma > 0, msg="sigma > 0")
else:
assert np.all(np.asarray(sigma) > 0)
alpha = (2 * sigma**2 + mu**2) / sigma**2
# Use sign of sigma to not let negative sigma fly by
alpha = ((2 * sigma**2 + mu**2) / sigma**2) * pt.sign(sigma)
beta = mu * (mu**2 + sigma**2) / sigma**2
else:
raise ValueError(
Expand Down
30 changes: 27 additions & 3 deletions pymc/distributions/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from pytensor.graph.rewriting.basic import in2out
from pytensor.graph.utils import MetaType
from pytensor.tensor.basic import as_tensor_variable
from pytensor.tensor.random.op import RandomVariable
from pytensor.tensor.random.op import RandomVariable, RNGConsumerOp
from pytensor.tensor.random.rewriting import local_subtensor_rv_lift
from pytensor.tensor.random.utils import normalize_size_param
from pytensor.tensor.rewriting.shape import ShapeFeature
Expand Down Expand Up @@ -207,7 +207,7 @@ def __get__(self, owner_self, owner_cls):
return self.fget(owner_self if owner_self is not None else owner_cls)


class SymbolicRandomVariable(MeasurableOp, OpFromGraph):
class SymbolicRandomVariable(MeasurableOp, RNGConsumerOp, OpFromGraph):
"""Symbolic Random Variable.
This is a subclasse of `OpFromGraph` which is used to encapsulate the symbolic
Expand Down Expand Up @@ -294,7 +294,10 @@ def default_output(cls_or_self) -> int | None:
@staticmethod
def get_input_output_type_idxs(
extended_signature: str | None,
) -> tuple[tuple[tuple[int], int | None, tuple[int]], tuple[tuple[int], tuple[int]]]:
) -> tuple[
tuple[tuple[int, ...], int | None, tuple[int, ...]],
tuple[tuple[int, ...], tuple[int, ...]],
]:
"""Parse extended_signature and return indexes for *[rng], [size] and parameters as well as outputs."""
if extended_signature is None:
raise ValueError("extended_signature must be provided")
Expand Down Expand Up @@ -367,8 +370,29 @@ def __init__(

kwargs.setdefault("inline", True)
kwargs.setdefault("strict", True)
# Many RVS have a size argument, even when this is `None` and is therefore unused
kwargs.setdefault("on_unused_input", "ignore")
if hasattr(self, "name"):
kwargs.setdefault("name", self.name)
super().__init__(*args, **kwargs)

def make_node(self, *inputs):
# If we try to build the RV with a different size type (vector -> None or None -> vector)
# We need to rebuild the Op with new size type in the inner graph
if self.extended_signature is not None:
(rng_arg_idxs, size_arg_idx, param_idxs), _ = self.get_input_output_type_idxs(
self.extended_signature
)
if size_arg_idx is not None and len(rng_arg_idxs) == 1:
new_size_type = normalize_size_param(inputs[size_arg_idx]).type
if not self.input_types[size_arg_idx].in_same_class(new_size_type):
params = [inputs[idx] for idx in param_idxs]
size = inputs[size_arg_idx]
rng = inputs[rng_arg_idxs[0]]
return self.rebuild_rv(*params, size=size, rng=rng).owner

return super().make_node(*inputs)

def update(self, node: Apply) -> dict[Variable, Variable]:
"""Symbolic update expression for input random state variables.
Expand Down
15 changes: 8 additions & 7 deletions pymc/distributions/multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ def quaddist_matrix(cov=None, chol=None, tau=None, lower=True, *args, **kwargs):
raise ValueError("chol must be at least two dimensional.")

if not lower:
chol = pt.swapaxes(chol, -1, -2)
chol = chol.mT

# tag as lower triangular to enable pytensor rewrites of chol(l.l') -> l
chol.tag.lower_triangular = True
cov = pt.matmul(chol, pt.swapaxes(chol, -1, -2))
cov = chol @ chol.mT

return cov

Expand Down Expand Up @@ -2664,6 +2664,7 @@ def logp(value, alpha, K):
class ZeroSumNormalRV(SymbolicRandomVariable):
"""ZeroSumNormal random variable."""

name = "ZeroSumNormal"
_print_name = ("ZeroSumNormal", "\\operatorname{ZeroSumNormal}")

@classmethod
Expand All @@ -2687,12 +2688,12 @@ def rv_op(cls, sigma, support_shape, *, size=None, rng=None):
zerosum_rv -= zerosum_rv.mean(axis=-axis - 1, keepdims=True)

support_str = ",".join([f"d{i}" for i in range(n_zerosum_axes)])
extended_signature = f"[rng],(),(s),[size]->[rng],({support_str})"
return ZeroSumNormalRV(
inputs=[rng, sigma, support_shape, size],
extended_signature = f"[rng],[size],(),(s)->[rng],({support_str})"
return cls(
inputs=[rng, size, sigma, support_shape],
outputs=[next_rng, zerosum_rv],
extended_signature=extended_signature,
)(rng, sigma, support_shape, size)
)(rng, size, sigma, support_shape)


class ZeroSumNormal(Distribution):
Expand Down Expand Up @@ -2828,7 +2829,7 @@ def zerosum_default_transform(op, rv):


@_logprob.register(ZeroSumNormalRV)
def zerosumnormal_logp(op, values, rng, sigma, support_shape, size, **kwargs):
def zerosumnormal_logp(op, values, rng, size, sigma, support_shape, **kwargs):
(value,) = values
shape = value.shape
n_zerosum_axes = op.ndim_supp
Expand Down
24 changes: 22 additions & 2 deletions pymc/distributions/shape_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

from collections.abc import Sequence
from functools import singledispatch
from types import EllipsisType
from typing import Any, TypeAlias, cast

import numpy as np
Expand Down Expand Up @@ -87,11 +88,13 @@ def _check_shape_type(shape):
# User-provided can be lazily specified as scalars
Shape: TypeAlias = int | TensorVariable | Sequence[int | Variable]
Dims: TypeAlias = str | Sequence[str | None]
DimsWithEllipsis: TypeAlias = str | EllipsisType | Sequence[str | None | EllipsisType]
Size: TypeAlias = int | TensorVariable | Sequence[int | Variable]

# After conversion to vectors
StrongShape: TypeAlias = TensorVariable | tuple[int | Variable, ...]
StrongDims: TypeAlias = Sequence[str | None]
StrongDims: TypeAlias = Sequence[str]
StrongDimsWithEllipsis: TypeAlias = Sequence[str | EllipsisType]
StrongSize: TypeAlias = TensorVariable | tuple[int | Variable, ...]


Expand All @@ -107,7 +110,24 @@ def convert_dims(dims: Dims | None) -> StrongDims | None:
else:
raise ValueError(f"The `dims` parameter must be a tuple, str or list. Actual: {type(dims)}")

return dims
return dims # type: ignore[return-value]


def convert_dims_with_ellipsis(dims: DimsWithEllipsis | None) -> StrongDimsWithEllipsis | None:
"""Process a user-provided dims variable into None or a valid dims tuple with ellipsis."""
if dims is None:
return None

if isinstance(dims, str | EllipsisType):
dims = (dims,)
elif isinstance(dims, list | tuple):
dims = tuple(dims)
else:
raise ValueError(
f"The `dims` parameter must be a tuple, list, str or Ellipsis. Actual: {type(dims)}"
)

return dims # type: ignore[return-value]


def convert_shape(shape: Shape) -> StrongShape | None:
Expand Down
32 changes: 30 additions & 2 deletions pymc/gp/cov.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,13 +617,27 @@ def full_from_distance(self, dist: TensorLike, squared: bool = False) -> TensorV

class Matern52(Stationary):
r"""
The Matern kernel with nu = 5/2.
The Matérn kernel with :math:`\nu = \frac{5}{2}`.
.. math::
k(x, x') = \left(1 + \frac{\sqrt{5(x - x')^2}}{\ell} +
\frac{5(x-x')^2}{3\ell^2}\right)
\mathrm{exp}\left[ - \frac{\sqrt{5(x - x')^2}}{\ell} \right]
Read more `here <https://en.wikipedia.org/wiki/Mat%C3%A9rn_covariance_function>`_.
Parameters
----------
input_dim : int
The number of input dimensions
ls : scalar or array, optional
Lengthscale parameter :math:`\ell`; if `input_dim` > 1, a list or array of scalars.
If `input_dim` == 1, a scalar.
ls_inv : scalar or array, optional
Inverse lengthscale :math:`1 / \ell`. One of `ls` or `ls_inv` must be provided.
active_dims : list of int, optional
The dimension(s) the covariance function operates on.
"""

def full_from_distance(self, dist: TensorLike, squared: bool = False) -> TensorVariable:
Expand Down Expand Up @@ -657,12 +671,26 @@ def power_spectral_density(self, omega: TensorLike) -> TensorVariable:

class Matern32(Stationary):
r"""
The Matern kernel with nu = 3/2.
The Matérn kernel with :math:`\nu = \frac{3}{2}`.
.. math::
k(x, x') = \left(1 + \frac{\sqrt{3(x - x')^2}}{\ell}\right)
\mathrm{exp}\left[ - \frac{\sqrt{3(x - x')^2}}{\ell} \right]
Read more `here <https://en.wikipedia.org/wiki/Mat%C3%A9rn_covariance_function>`_.
Parameters
----------
input_dim : int
The number of input dimensions
ls : scalar or array, optional
Lengthscale parameter :math:`\ell`; if `input_dim` > 1, a list or array of scalars.
If `input_dim` == 1, a scalar.
ls_inv : scalar or array, optional
Inverse lengthscale :math:`1 / \ell`. One of `ls` or `ls_inv` must be provided.
active_dims : list of int, optional
The dimension(s) the covariance function operates on.
"""

def full_from_distance(self, dist: TensorLike, squared: bool = False) -> TensorVariable:
Expand Down
100 changes: 77 additions & 23 deletions pymc/initial_point.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,11 @@
import pytensor
import pytensor.tensor as pt

from pytensor.graph.basic import Variable
from pytensor import graph_replace
from pytensor.compile.ops import TypeCastingOp
from pytensor.graph.basic import Apply, Variable, ancestors, walk
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.rewriting.db import RewriteDatabaseQuery, SequenceDB
from pytensor.tensor.variable import TensorVariable

from pymc.logprob.transforms import Transform
Expand All @@ -37,6 +40,8 @@

StartDict = dict[Variable | str, np.ndarray | Variable | str]
PointType = dict[str, np.ndarray]
initial_point_rewrites_db = SequenceDB()
initial_point_basic_query = RewriteDatabaseQuery(include=["basic"])


def convert_str_to_rv_dict(
Expand Down Expand Up @@ -192,6 +197,25 @@ def inner(seed, *args, **kwargs):
return make_seeded_function(func)


class InitialPoint(TypeCastingOp):
def make_node(self, var):
return Apply(self, [var], [var.type()])


def non_support_point_ancestors(value):
def expand(r: Variable):
node = r.owner
if node is not None and not isinstance(node.op, InitialPoint):
# Stop graph traversal at InitialPoint ops
return node.inputs
return None

yield from walk([value], expand, bfs=False)


initial_point_op = InitialPoint()


def make_initial_point_expression(
*,
free_rvs: Sequence[TensorVariable],
Expand Down Expand Up @@ -230,11 +254,25 @@ def make_initial_point_expression(
if jitter_rvs is None:
jitter_rvs = set()

# Clone free_rvs so we don't modify the original graph
initial_point_fgraph = FunctionGraph(outputs=free_rvs, clone=True)
# Wrap each rv in an initial_point Operation to avoid losing dependency between the RVs
replacements = tuple((rv, initial_point_op(rv)) for rv in initial_point_fgraph.outputs)
toposort_replace(initial_point_fgraph, replacements, reverse=True)

# Apply any rewrites necessary to compute the initial points.
initial_point_rewriter = initial_point_rewrites_db.query(initial_point_basic_query)
if initial_point_rewriter:
initial_point_rewriter.rewrite(initial_point_fgraph)

ip_variables = initial_point_fgraph.outputs.copy()
free_rvs_clone = [ip.owner.inputs[0] for ip in ip_variables]
n_rvs = len(free_rvs_clone)

initial_values = []
initial_values_transformed = []

for variable in free_rvs:
strategy = initval_strategies.get(variable, None)
for original_variable, variable in zip(free_rvs, free_rvs_clone):
strategy = initval_strategies.get(original_variable)

if strategy is None:
strategy = default_strategy
Expand All @@ -243,9 +281,23 @@ def make_initial_point_expression(
if strategy == "support_point":
try:
value = support_point(variable)

# If a support point expression depends on other free_RVs that are not
# wrapped in InitialPoint, we need to replace them with their wrapped versions
# This can only happen for multi-output distributions, where the initial point
# of some outputs depends on the initial point of other outputs from the same node.
other_free_rvs = set(free_rvs_clone) - {variable}
support_point_replacements = {
ancestor: ip_variables[free_rvs_clone.index(ancestor)]
for ancestor in non_support_point_ancestors(value)
if ancestor in other_free_rvs
}
if support_point_replacements:
value = graph_replace(value, support_point_replacements)

except NotImplementedError:
warnings.warn(
f"Moment not defined for variable {variable} of type "
f"Support point not defined for variable {variable} of type "
f"{variable.owner.op.__class__.__name__}, defaulting to "
f"a draw from the prior. This can lead to difficulties "
f"during tuning. You can manually define an initval or "
Expand All @@ -261,16 +313,23 @@ def make_initial_point_expression(
f'Invalid string strategy: {strategy}. It must be one of ["support_point", "prior"]'
)
else:
value = pt.as_tensor(strategy, dtype=variable.dtype).astype(variable.dtype)
if isinstance(strategy, Variable) and (set(free_rvs) & set(ancestors([strategy]))):
raise ValueError(
f"Initial value of {original_variable} depends on other random variables. This is not supported anymore."
)
value = pt.as_tensor(strategy, variable.dtype).astype(variable.dtype)

transform = rvs_to_transforms.get(variable, None)
transform = rvs_to_transforms.get(original_variable, None)

if transform is not None:
value = transform.forward(value, *variable.owner.inputs)

if variable in jitter_rvs:
if original_variable in jitter_rvs:
jitter = pt.random.uniform(-1, 1, size=value.shape)
# Hack to allow xtensor value to be added to tensor jitter
jitter = value.type.filter_variable(jitter)
jitter.name = f"{variable.name}_jitter"
# Hack to allow xtensor value to be added to tensor jitter
value = value + jitter

value = value.astype(variable.dtype)
Expand All @@ -281,28 +340,23 @@ def make_initial_point_expression(

initial_values.append(value)

all_outputs: list[TensorVariable] = []
all_outputs.extend(free_rvs)
all_outputs.extend(initial_values)
all_outputs.extend(initial_values_transformed)

copy_graph = FunctionGraph(outputs=all_outputs, clone=True)

n_variables = len(free_rvs)
free_rvs_clone = copy_graph.outputs[:n_variables]
initial_values_clone = copy_graph.outputs[n_variables:-n_variables]
initial_values_transformed_clone = copy_graph.outputs[-n_variables:]
for initial_value in initial_values:
# FIXME: This is a hack so that interdependent replacements that can't
# be sorted topologically from the initial point graph come out correctly.
# This happens for multi-output nodes where the replacements depend on each other.
# From the original graph perspective, their ordering is equivalent.
initial_point_fgraph.add_output(initial_value, import_missing=True)

# We now replace all rvs by the respective initial_point expressions
# in the constrained (untransformed) space. We do this in reverse topological
# order, so that later nodes do not reintroduce expressions with earlier
# rvs that would need to once again be replaced by their initial_points
graph = FunctionGraph(outputs=free_rvs_clone, clone=False)
toposort_replace(graph, tuple(zip(free_rvs_clone, initial_values_clone)), reverse=True)
toposort_replace(initial_point_fgraph, tuple(zip(ip_variables, initial_values)), reverse=True)

if not return_transformed:
return graph.outputs
return initial_point_fgraph.outputs[:n_rvs]

# Because the unconstrained (transformed) expressions are a subgraph of the
# constrained initial point they were also automatically updated inplace
# when calling graph.replace_all above, so we don't need to do anything else
return initial_values_transformed_clone
return initial_values_transformed
33 changes: 19 additions & 14 deletions pymc/logprob/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
Constant,
Variable,
ancestors,
walk,
)
from pytensor.graph.rewriting.basic import GraphRewriter, NodeRewriter
from pytensor.tensor.variable import TensorVariable
Expand All @@ -60,8 +61,8 @@
from pymc.logprob.rewriting import cleanup_ir, construct_ir_fgraph
from pymc.logprob.transform_value import TransformValuesRewrite
from pymc.logprob.transforms import Transform
from pymc.logprob.utils import get_related_valued_nodes, rvs_in_graph
from pymc.pytensorf import replace_vars_in_graphs
from pymc.logprob.utils import get_related_valued_nodes
from pymc.pytensorf import expand_inner_graph, replace_vars_in_graphs

TensorLike: TypeAlias = Variable | float | np.ndarray

Expand All @@ -71,9 +72,13 @@ def _find_unallowed_rvs_in_graph(graph):
from pymc.distributions.simulator import SimulatorRV

return {
rv
for rv in rvs_in_graph(graph)
if not isinstance(rv.owner.op, SimulatorRV | MinibatchIndexRV)
var
for var in walk(graph, expand_inner_graph, False)
if (
var.owner
and isinstance(var.owner.op, MeasurableOp)
and not isinstance(var.owner.op, SimulatorRV | MinibatchIndexRV)
)
}


Expand Down Expand Up @@ -192,9 +197,9 @@ def normal_logp(value, mu, sigma):
[ir_valued_var] = fgraph.outputs
[ir_rv, ir_value] = ir_valued_var.owner.inputs
expr = _logprob_helper(ir_rv, ir_value, **kwargs)
cleanup_ir([expr])
[expr] = cleanup_ir([expr])
if warn_rvs:
_warn_rvs_in_inferred_graph(expr)
_warn_rvs_in_inferred_graph([expr])
return expr


Expand Down Expand Up @@ -292,9 +297,9 @@ def normal_logcdf(value, mu, sigma):
[ir_valued_rv] = fgraph.outputs
[ir_rv, ir_value] = ir_valued_rv.owner.inputs
expr = _logcdf_helper(ir_rv, ir_value, **kwargs)
cleanup_ir([expr])
[expr] = cleanup_ir([expr])
if warn_rvs:
_warn_rvs_in_inferred_graph(expr)
_warn_rvs_in_inferred_graph([expr])
return expr


Expand Down Expand Up @@ -374,9 +379,9 @@ def icdf(rv: TensorVariable, value: TensorLike, warn_rvs=True, **kwargs) -> Tens
[ir_valued_rv] = fgraph.outputs
[ir_rv, ir_value] = ir_valued_rv.owner.inputs
expr = _icdf_helper(ir_rv, ir_value, **kwargs)
cleanup_ir([expr])
[expr] = cleanup_ir([expr])
if warn_rvs:
_warn_rvs_in_inferred_graph(expr)
_warn_rvs_in_inferred_graph([expr])
return expr


Expand Down Expand Up @@ -528,8 +533,8 @@ def conditional_logp(
f"The logprob terms of the following value variables could not be derived: {missing_value_terms}"
)

logprobs = list(values_to_logprobs.values())
cleanup_ir(logprobs)
values, logprobs = zip(*values_to_logprobs.items())
logprobs = cleanup_ir(logprobs)

if warn_rvs:
rvs_in_logp_expressions = _find_unallowed_rvs_in_graph(logprobs)
Expand All @@ -540,7 +545,7 @@ def conditional_logp(
UserWarning,
)

return values_to_logprobs
return dict(zip(values, logprobs))


def transformed_conditional_logp(
Expand Down
33 changes: 26 additions & 7 deletions pymc/logprob/rewriting.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ def remove_DiracDelta(fgraph, node):
return [dd_val]


logprob_rewrites_basic_query = RewriteDatabaseQuery(include=["basic"])
logprob_rewrites_cleanup_query = RewriteDatabaseQuery(include=["cleanup"])

logprob_rewrites_db = SequenceDB()
logprob_rewrites_db.name = "logprob_rewrites_db"

Expand All @@ -146,16 +149,21 @@ def remove_DiracDelta(fgraph, node):
failure_callback=None,
),
"basic",
position=0,
)

# Introduce sigmoid. We do it before canonicalization so that useless mul are removed next
logprob_rewrites_db.register(
"local_exp_over_1_plus_exp", out2in(local_exp_over_1_plus_exp), "basic"
"local_exp_over_1_plus_exp",
out2in(local_exp_over_1_plus_exp),
"basic",
position=0.9,
)
logprob_rewrites_db.register(
"pre-canonicalize",
optdb.query("+canonicalize", "-local_eager_useless_unbatched_blockwise"),
"basic",
position=1,
)

# These rewrites convert un-measurable variables into their measurable forms,
Expand All @@ -164,25 +172,34 @@ def remove_DiracDelta(fgraph, node):
measurable_ir_rewrites_db = EquilibriumDB()
measurable_ir_rewrites_db.name = "measurable_ir_rewrites_db"

logprob_rewrites_db.register("measurable_ir_rewrites", measurable_ir_rewrites_db, "basic")
logprob_rewrites_db.register(
"measurable_ir_rewrites",
measurable_ir_rewrites_db,
"basic",
position=2,
)

# These rewrites push random/measurable variables "down", making them closer to
# (or eventually) the graph outputs. Often this is done by lifting other `Op`s
# "up" through the random/measurable variables and into their inputs.
measurable_ir_rewrites_db.register("subtensor_lift", local_subtensor_rv_lift, "basic")

# These rewrites are used to introduce specalized operations with better logprob graphs
# These rewrites are used to introduce specialized operations with better logprob graphs
specialization_ir_rewrites_db = EquilibriumDB()
specialization_ir_rewrites_db.name = "specialization_ir_rewrites_db"
logprob_rewrites_db.register(
"specialization_ir_rewrites_db", specialization_ir_rewrites_db, "basic"
"specialization_ir_rewrites_db",
specialization_ir_rewrites_db,
"basic",
position=3,
)


logprob_rewrites_db.register(
"post-canonicalize",
optdb.query("+canonicalize", "-local_eager_useless_unbatched_blockwise"),
"basic",
position=4,
)

# Rewrites that remove IR Ops
Expand All @@ -192,6 +209,7 @@ def remove_DiracDelta(fgraph, node):
"cleanup_ir_rewrites",
TopoDB(cleanup_ir_rewrites_db, order="out_to_in", ignore_newtrees=True, failure_callback=None),
"cleanup",
position=5,
)

cleanup_ir_rewrites_db.register("remove_DiracDelta", remove_DiracDelta, "cleanup")
Expand Down Expand Up @@ -250,7 +268,7 @@ def construct_ir_fgraph(
toposort_replace(fgraph, replacements, reverse=True)

if ir_rewriter is None:
ir_rewriter = logprob_rewrites_db.query(RewriteDatabaseQuery(include=["basic"]))
ir_rewriter = logprob_rewrites_db.query(logprob_rewrites_basic_query)
ir_rewriter.rewrite(fgraph)

# Reintroduce original value variables
Expand All @@ -260,10 +278,11 @@ def construct_ir_fgraph(
return fgraph


def cleanup_ir(vars: Sequence[Variable]) -> None:
def cleanup_ir(vars: Sequence[Variable]) -> Sequence[Variable]:
fgraph = FunctionGraph(outputs=vars, clone=False)
ir_rewriter = logprob_rewrites_db.query(RewriteDatabaseQuery(include=["cleanup"]))
ir_rewriter = logprob_rewrites_db.query(logprob_rewrites_cleanup_query)
ir_rewriter.rewrite(fgraph)
return fgraph.outputs


def assume_valued_outputs(outputs: Sequence[TensorVariable]) -> Sequence[TensorVariable]:
Expand Down
38 changes: 14 additions & 24 deletions pymc/logprob/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,21 +43,18 @@

from pytensor import tensor as pt
from pytensor.graph import Apply, Op, node_rewriter
from pytensor.graph.basic import Constant, Variable, clone_get_equiv, graph_inputs, walk
from pytensor.graph.basic import Constant, clone_get_equiv, graph_inputs, walk
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import HasInnerGraph
from pytensor.link.c.type import CType
from pytensor.raise_op import CheckAndRaise
from pytensor.scalar.basic import Mul
from pytensor.tensor.basic import get_underlying_scalar_constant_value
from pytensor.tensor.elemwise import Elemwise
from pytensor.tensor.exceptions import NotScalarConstantError
from pytensor.tensor.random.op import RandomVariable
from pytensor.tensor.variable import TensorVariable

from pymc.logprob.abstract import MeasurableOp, ValuedRV, _logprob
from pymc.pytensorf import replace_vars_in_graphs
from pymc.util import makeiter

if typing.TYPE_CHECKING:
from pymc.logprob.transforms import Transform
Expand Down Expand Up @@ -130,26 +127,6 @@ def populate_replacements(var):
return replace_vars_in_graphs(graphs, replacements)


def rvs_in_graph(vars: Variable | Sequence[Variable]) -> set[Variable]:
"""Assert that there are no `MeasurableOp` nodes in a graph."""

def expand(r):
owner = r.owner
if owner:
inputs = list(reversed(owner.inputs))

if isinstance(owner.op, HasInnerGraph):
inputs += owner.op.inner_outputs

return inputs

return {
node
for node in walk(makeiter(vars), expand, False)
if node.owner and isinstance(node.owner.op, RandomVariable | MeasurableOp)
}


def convert_indices(indices, entry):
if indices and isinstance(entry, CType):
rval = indices.pop(0)
Expand Down Expand Up @@ -334,3 +311,16 @@ def get_related_valued_nodes(fgraph: FunctionGraph, node: Apply) -> list[Apply]:
for client, _ in clients[out]
if isinstance(client.op, ValuedRV)
]


def __getattr__(name):
if name == "rvs_in_graphs":
warnings.warn(
f"{name} has been moved to `pymc.pytensorf`. Importing from `pymc.logprob.utils` will fail in a future release.",
FutureWarning,
)
from pymc.pytensorf import rvs_in_graph

return rvs_in_graph()

raise AttributeError(f"module {__name__} has no attribute {name}")
4 changes: 4 additions & 0 deletions pymc/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
arcsinh,
arctan,
arctanh,
as_tensor,
broadcast_to,
ceil,
clip,
Expand All @@ -42,6 +43,7 @@
cosh,
cumprod,
cumsum,
diff,
dot,
eq,
erf,
Expand Down Expand Up @@ -103,6 +105,7 @@
"arcsinh",
"arctan",
"arctanh",
"as_tensor",
"batched_diag",
"block_diagonal",
"broadcast_to",
Expand All @@ -115,6 +118,7 @@
"cosh",
"cumprod",
"cumsum",
"diff",
"dot",
"eq",
"erf",
Expand Down
14 changes: 10 additions & 4 deletions pymc/model/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@
from pytensor.compile import DeepCopyOp, Function, ProfileStats, get_mode
from pytensor.compile.sharedvalue import SharedVariable
from pytensor.graph.basic import Constant, Variable, ancestors, graph_inputs
from pytensor.tensor import as_tensor
from pytensor.tensor.math import variadic_add
from pytensor.tensor.random.op import RandomVariable
from pytensor.tensor.random.type import RandomType
from pytensor.tensor.variable import TensorConstant, TensorVariable
Expand Down Expand Up @@ -231,7 +233,9 @@ def __init__(
grads = pytensor.grad(cost, grad_vars, disconnected_inputs="ignore")
for grad_wrt, var in zip(grads, grad_vars):
grad_wrt.name = f"{var.name}_grad"
grads = pt.join(0, *[pt.atleast_1d(grad.ravel()) for grad in grads])
grads = pt.join(
0, *[as_tensor(grad, allow_xtensor_conversion=True).ravel() for grad in grads]
)
outputs = [cost, grads]
else:
outputs = [cost]
Expand Down Expand Up @@ -708,7 +712,9 @@ def logp(
if not sum:
return logp_factors

logp_scalar = pt.sum([pt.sum(factor) for factor in logp_factors])
logp_scalar = variadic_add(
*(as_tensor(factor, allow_xtensor_conversion=True).sum() for factor in logp_factors)
)
logp_scalar_name = "__logp" if jacobian else "__logp_nojac"
if self.name:
logp_scalar_name = f"{logp_scalar_name}_{self.name}"
Expand Down Expand Up @@ -1328,7 +1334,7 @@ def make_obs_var(
else:
if sps.issparse(data):
data = sparse.basic.as_sparse(data, name=name)
else:
elif not isinstance(data, Variable):
data = pt.as_tensor_variable(data, name=name)

if total_size:
Expand Down Expand Up @@ -1781,7 +1787,7 @@ def point_logps(self, point=None, round_vals=2, **kwargs):
point = self.initial_point()

factors = self.basic_RVs + self.potentials
factor_logps_fn = [pt.sum(factor) for factor in self.logp(factors, sum=False)]
factor_logps_fn = [factor.sum() for factor in self.logp(factors, sum=False)]
return {
factor.name: np.round(np.asarray(factor_logp), round_vals)
for factor, factor_logp in zip(
Expand Down
3 changes: 1 addition & 2 deletions pymc/model/transform/conditioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from pytensor.tensor import TensorVariable

from pymc.logprob.transforms import Transform
from pymc.logprob.utils import rvs_in_graph
from pymc.model.core import Model
from pymc.model.fgraph import (
ModelDeterministic,
Expand All @@ -41,7 +40,7 @@
parse_vars,
prune_vars_detached_from_observed,
)
from pymc.pytensorf import replace_vars_in_graphs, toposort_replace
from pymc.pytensorf import replace_vars_in_graphs, rvs_in_graph, toposort_replace
from pymc.util import get_transformed_name, get_untransformed_name


Expand Down
34 changes: 18 additions & 16 deletions pymc/model_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@
from typing import Any, cast

from pytensor import function
from pytensor.graph.basic import ancestors, walk
from pytensor.graph.basic import Variable, ancestors, walk
from pytensor.tensor.shape import Shape
from pytensor.tensor.variable import TensorVariable

from pymc.model.core import modelcontext
from pymc.pytensorf import _cheap_eval_mode
from pymc.util import VarName, get_default_varnames, get_var_name

__all__ = (
Expand Down Expand Up @@ -73,7 +73,7 @@ def create_plate_label_with_dim_length(


def fast_eval(var):
return function([], var, mode="FAST_COMPILE")()
return function([], var, mode=_cheap_eval_mode)()


class NodeType(str, Enum):
Expand All @@ -88,7 +88,7 @@ class NodeType(str, Enum):

@dataclass
class NodeInfo:
var: TensorVariable
var: Variable
node_type: NodeType

def __hash__(self):
Expand All @@ -108,10 +108,10 @@ def __eq__(self, other) -> bool:


GraphvizNodeKwargs = dict[str, Any]
NodeFormatter = Callable[[TensorVariable], GraphvizNodeKwargs]
NodeFormatter = Callable[[Variable], GraphvizNodeKwargs]


def default_potential(var: TensorVariable) -> GraphvizNodeKwargs:
def default_potential(var: Variable) -> GraphvizNodeKwargs:
"""Return default data for potential in the graph."""
return {
"shape": "octagon",
Expand All @@ -120,17 +120,19 @@ def default_potential(var: TensorVariable) -> GraphvizNodeKwargs:
}


def random_variable_symbol(var: TensorVariable) -> str:
def random_variable_symbol(var: Variable) -> str:
"""Get the symbol of the random variable."""
symbol = var.owner.op.__class__.__name__
op = var.owner.op

if symbol.endswith("RV"):
symbol = symbol[:-2]
if name := getattr(op, "name", None):
symbol = name[0].upper() + name[1:]
else:
symbol = op.__class__.__name__.removesuffix("RV")

return symbol


def default_free_rv(var: TensorVariable) -> GraphvizNodeKwargs:
def default_free_rv(var: Variable) -> GraphvizNodeKwargs:
"""Return default data for free RV in the graph."""
symbol = random_variable_symbol(var)

Expand All @@ -141,7 +143,7 @@ def default_free_rv(var: TensorVariable) -> GraphvizNodeKwargs:
}


def default_observed_rv(var: TensorVariable) -> GraphvizNodeKwargs:
def default_observed_rv(var: Variable) -> GraphvizNodeKwargs:
"""Return default data for observed RV in the graph."""
symbol = random_variable_symbol(var)

Expand All @@ -152,7 +154,7 @@ def default_observed_rv(var: TensorVariable) -> GraphvizNodeKwargs:
}


def default_deterministic(var: TensorVariable) -> GraphvizNodeKwargs:
def default_deterministic(var: Variable) -> GraphvizNodeKwargs:
"""Return default data for the deterministic in the graph."""
return {
"shape": "box",
Expand All @@ -161,7 +163,7 @@ def default_deterministic(var: TensorVariable) -> GraphvizNodeKwargs:
}


def default_data(var: TensorVariable) -> GraphvizNodeKwargs:
def default_data(var: Variable) -> GraphvizNodeKwargs:
"""Return default data for the data in the graph."""
return {
"shape": "box",
Expand Down Expand Up @@ -239,7 +241,7 @@ def __init__(self, model):
self._all_vars = {model[var_name] for var_name in self._all_var_names}
self.var_list = self.model.named_vars.values()

def get_parent_names(self, var: TensorVariable) -> set[VarName]:
def get_parent_names(self, var: Variable) -> set[VarName]:
if var.owner is None:
return set()

Expand Down Expand Up @@ -345,7 +347,7 @@ def get_plates(
dim_name: fast_eval(value).item() for dim_name, value in self.model.dim_lengths.items()
}
var_shapes: dict[str, tuple[int, ...]] = {
var_name: tuple(fast_eval(self.model[var_name].shape))
var_name: tuple(map(int, fast_eval(self.model[var_name].shape)))
for var_name in self.vars_to_plot(var_names)
}

Expand Down
174 changes: 86 additions & 88 deletions pymc/pytensorf.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,20 @@
clone_get_equiv,
equal_computations,
graph_inputs,
walk,
)
from pytensor.graph.fg import FunctionGraph, Output
from pytensor.graph.op import HasInnerGraph
from pytensor.scalar.basic import Cast
from pytensor.scan.op import Scan
from pytensor.tensor.basic import _as_tensor_variable
from pytensor.tensor.elemwise import Elemwise
from pytensor.tensor.random.op import RandomVariable
from pytensor.tensor.random.op import RandomVariable, RNGConsumerOp
from pytensor.tensor.random.type import RandomType
from pytensor.tensor.random.var import RandomGeneratorSharedVariable
from pytensor.tensor.rewriting.basic import topo_unconditional_constant_folding
from pytensor.tensor.rewriting.shape import ShapeFeature
from pytensor.tensor.sharedvar import SharedVariable, TensorSharedVariable
from pytensor.tensor.sharedvar import SharedVariable
from pytensor.tensor.subtensor import AdvancedIncSubtensor, AdvancedIncSubtensor1
from pytensor.tensor.variable import TensorVariable

Expand Down Expand Up @@ -133,6 +135,9 @@ def dataframe_to_tensor_variable(df: pd.DataFrame, *args, **kwargs) -> TensorVar
return pt.as_tensor_variable(df.to_numpy(), *args, **kwargs)


_cheap_eval_mode = Mode(linker="py", optimizer="minimum_compile")


def extract_obs_data(x: TensorVariable) -> np.ndarray:
"""Extract data from observed symbolic variables.
Expand Down Expand Up @@ -161,15 +166,31 @@ def extract_obs_data(x: TensorVariable) -> np.ndarray:
mask[mask_idx] = 1
return np.ma.MaskedArray(array_data, mask)

from pymc.logprob.utils import rvs_in_graph

if not inputvars(x) and not rvs_in_graph(x):
cheap_eval_mode = Mode(linker="py", optimizer=None)
return x.eval(mode=cheap_eval_mode)
return x.eval(mode=_cheap_eval_mode)

raise TypeError(f"Data cannot be extracted from {x}")


def expand_inner_graph(r):
if (node := r.owner) is not None:
inputs = list(reversed(node.inputs))

if isinstance(node.op, HasInnerGraph):
inputs += node.op.inner_outputs

return inputs


def rvs_in_graph(vars: Variable | Sequence[Variable], rv_ops=None) -> set[Variable]:
"""Assert that there are no random nodes in a graph."""
return {
var
for var in walk(makeiter(vars), expand_inner_graph, False)
if (var.owner and isinstance(var.owner.op, RNGConsumerOp))
}


def replace_vars_in_graphs(
graphs: Iterable[Variable],
replacements: dict[Variable, Variable],
Expand Down Expand Up @@ -279,7 +300,9 @@ def smarttypeX(x):

def gradient1(f, v):
"""Flat gradient of f wrt v."""
return pt.flatten(grad(f, v, disconnected_inputs="warn"))
return pt.as_tensor(
grad(f, v, disconnected_inputs="warn"), allow_xtensor_conversion=True
).ravel()


empty_gradient = pt.zeros(0, dtype="float32")
Expand Down Expand Up @@ -398,11 +421,11 @@ def make_shared_replacements(point, vars, model):

def join_nonshared_inputs(
point: dict[str, np.ndarray],
outputs: list[TensorVariable],
inputs: list[TensorVariable],
shared_inputs: dict[TensorVariable, TensorSharedVariable] | None = None,
outputs: Sequence[Variable],
inputs: Sequence[Variable],
shared_inputs: dict[Variable, Variable] | None = None,
make_inputs_shared: bool = False,
) -> tuple[list[TensorVariable], TensorVariable]:
) -> tuple[Sequence[Variable], TensorVariable]:
"""
Create new outputs and input TensorVariables where the non-shared inputs are joined in a single raveled vector input.
Expand Down Expand Up @@ -527,7 +550,9 @@ def join_nonshared_inputs(
if not inputs:
raise ValueError("Empty list of input variables.")

raveled_inputs = pt.concatenate([var.ravel() for var in inputs])
raveled_inputs = pt.concatenate(
[pt.as_tensor(var, allow_xtensor_conversion=True).ravel() for var in inputs]
)

if not make_inputs_shared:
tensor_type = raveled_inputs.type
Expand All @@ -539,12 +564,15 @@ def join_nonshared_inputs(
if pytensor.config.compute_test_value != "off":
joined_inputs.tag.test_value = raveled_inputs.tag.test_value

replace: dict[TensorVariable, TensorVariable] = {}
replace: dict[Variable, Variable] = {}
last_idx = 0
for var in inputs:
shape = point[var.name].shape
arr_len = np.prod(shape, dtype=int)
replace[var] = joined_inputs[last_idx : last_idx + arr_len].reshape(shape).astype(var.dtype)
replacement_var = (
joined_inputs[last_idx : last_idx + arr_len].reshape(shape).astype(var.dtype)
)
replace[var] = var.type.filter_variable(replacement_var)
last_idx += arr_len

if shared_inputs is not None:
Expand Down Expand Up @@ -718,8 +746,6 @@ def scan_step(xtm1):
xs_draws = pm.draw(xs, draws=10)
"""
# Avoid circular import
from pymc.distributions.distribution import SymbolicRandomVariable

def find_default_update(clients, rng: Variable) -> None | Variable:
rng_clients = clients.get(rng, None)
Expand Down Expand Up @@ -762,48 +788,47 @@ def find_default_update(clients, rng: Variable) -> None | Variable:
[client, _] = rng_clients[0]

# RNG is an output of the function, this is not a problem
if isinstance(client.op, Output):
return None
client_op = client.op

# RNG is used by another operator, which should output an update for the RNG
if isinstance(client.op, RandomVariable):
# RandomVariable first output is always the update of the input RNG
next_rng = client.outputs[0]

elif isinstance(client.op, SymbolicRandomVariable):
# SymbolicRandomVariable have an explicit method that returns an
# update mapping for their RNG(s)
next_rng = client.op.update(client).get(rng)
if next_rng is None:
raise ValueError(
f"No update found for at least one RNG used in SymbolicRandomVariable Op {client.op}"
)
elif isinstance(client.op, Scan):
# Check if any shared output corresponds to the RNG
rng_idx = client.inputs.index(rng)
io_map = client.op.get_oinp_iinp_iout_oout_mappings()["outer_out_from_outer_inp"]
out_idx = io_map.get(rng_idx, -1)
if out_idx != -1:
next_rng = client.outputs[out_idx]
else: # No break
raise ValueError(
f"No update found for at least one RNG used in Scan Op {client.op}.\n"
"You can use `pytensorf.collect_default_updates` inside the Scan function to return updates automatically."
)
elif isinstance(client.op, OpFromGraph):
try:
next_rng = collect_default_updates_inner_fgraph(client).get(rng)
match client_op:
case Output():
return None
# Otherwise, RNG is used by another operator, which should output an update for the RNG
case RandomVariable():
# RandomVariable first output is always the update of the input RNG
next_rng = client.outputs[0]
case RNGConsumerOp():
# RNGConsumerOp have an explicit method that returns an update mapping for their RNG(s)
# RandomVariable is a subclass of RNGConsumerOp, but we specialize above for speedup
next_rng = client_op.update(client).get(rng)
if next_rng is None:
# OFG either does not make use of this RNG or inconsistent use that will have emitted a warning
return None
except ValueError as exc:
raise ValueError(
f"No update found for at least one RNG used in OpFromGraph Op {client.op}.\n"
"You can use `pytensorf.collect_default_updates` and include those updates as outputs."
) from exc
else:
# We don't know how this RNG should be updated. The user should provide an update manually
return None
raise ValueError(f"No update found for at least one RNG used in {client_op}")
case Scan():
# Check if any shared output corresponds to the RNG
rng_idx = client.inputs.index(rng)
io_map = client_op.get_oinp_iinp_iout_oout_mappings()["outer_out_from_outer_inp"]
out_idx = io_map.get(rng_idx, -1)
if out_idx != -1:
next_rng = client.outputs[out_idx]
else: # No break
raise ValueError(
f"No update found for at least one RNG used in Scan Op {client_op}.\n"
"You can use `pytensorf.collect_default_updates` inside the Scan function to return updates automatically."
)
case OpFromGraph():
try:
next_rng = collect_default_updates_inner_fgraph(client).get(rng)
if next_rng is None:
# OFG either does not make use of this RNG or inconsistent use that will have emitted a warning
return None
except ValueError as exc:
raise ValueError(
f"No update found for at least one RNG used in OpFromGraph Op {client_op}.\n"
"You can use `pytensorf.collect_default_updates` and include those updates as outputs."
) from exc
case _:
# We don't know how this RNG should be updated. The user should provide an update manually
return None

# Recurse until we find final update for RNG
nested_next_rng = find_default_update(clients, next_rng)
Expand Down Expand Up @@ -992,43 +1017,16 @@ def as_symbolic_string(x, **kwargs):


def toposort_replace(
fgraph: FunctionGraph, replacements: Sequence[tuple[Variable, Variable]], reverse: bool = False
fgraph: FunctionGraph,
replacements: Sequence[tuple[Variable, Variable]],
reverse: bool = False,
) -> None:
"""Replace multiple variables in place in topological order."""
fgraph_toposort = {node: i for i, node in enumerate(fgraph.toposort())}
_inner_fgraph_toposorts = {} # Cache inner toposorts

def _nested_toposort_index(var, fgraph_toposort) -> tuple[int]:
"""Compute position of variable in fgraph toposort.
When a variable is an OpFromGraph output, extend output with the toposort index of the inner graph(s).
This allows ordering variables that come from the same OpFromGraph.
"""
if not var.owner:
return (-1,)

index = fgraph_toposort[var.owner]

# Recurse into OpFromGraphs
# TODO: Could also recurse into Scans
if isinstance(var.owner.op, OpFromGraph):
inner_fgraph = var.owner.op.fgraph

if inner_fgraph not in _inner_fgraph_toposorts:
_inner_fgraph_toposorts[inner_fgraph] = {
node: i for i, node in enumerate(inner_fgraph.toposort())
}

inner_fgraph_toposort = _inner_fgraph_toposorts[inner_fgraph]
inner_var = inner_fgraph.outputs[var.owner.outputs.index(var)]
return (index, *_nested_toposort_index(inner_var, inner_fgraph_toposort))
else:
return (index,)

fgraph_toposort[None] = -1 # Variables without owner are not in the toposort
sorted_replacements = sorted(
replacements,
key=lambda pair: _nested_toposort_index(pair[0], fgraph_toposort),
key=lambda pair: fgraph_toposort[pair[0].owner],
reverse=reverse,
)
fgraph.replace_all(sorted_replacements, import_missing=True)
Expand Down
3 changes: 1 addition & 2 deletions pymc/sampling/forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,9 @@
from pymc.backends.base import MultiTrace
from pymc.blocking import PointType
from pymc.distributions.shape_utils import change_dist_size
from pymc.logprob.utils import rvs_in_graph
from pymc.model import Model, modelcontext
from pymc.progress_bar import CustomProgress, default_progress_theme
from pymc.pytensorf import compile
from pymc.pytensorf import compile, rvs_in_graph
from pymc.util import (
RandomState,
_get_seeds_per_chain,
Expand Down
8 changes: 6 additions & 2 deletions pymc/step_methods/metropolis.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
from collections.abc import Callable
from dataclasses import field
from typing import Any
from typing import Any, cast

import numpy as np
import numpy.random as nr
Expand All @@ -22,6 +22,7 @@
import scipy.special

from pytensor import tensor as pt
from pytensor.graph.basic import Variable
from pytensor.graph.fg import MissingInputError
from pytensor.tensor.random.basic import BernoulliRV, CategoricalRV
from rich.progress import TextColumn
Expand Down Expand Up @@ -1253,7 +1254,10 @@ def delta_logp(
compile_kwargs: dict | None,
) -> pytensor.compile.Function:
[logp0], inarray0 = join_nonshared_inputs(
point=point, outputs=[logp], inputs=vars, shared_inputs=shared
point=point,
outputs=[logp],
inputs=vars,
shared_inputs=cast(dict[Variable, Variable], shared),
)

tensor_type = inarray0.type
Expand Down
35 changes: 30 additions & 5 deletions pymc/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,13 @@
from arviz import InferenceData
from numpy import random as nr
from numpy import testing as npt
from pytensor.compile import SharedVariable
from pytensor.compile.mode import Mode
from pytensor.graph.basic import Variable
from pytensor.graph.basic import Constant, Variable, equal_computations, graph_inputs
from pytensor.graph.rewriting.basic import in2out
from pytensor.tensor import TensorVariable
from pytensor.tensor.random.op import RandomVariable
from pytensor.tensor.random.type import RandomType
from scipy import special as sp
from scipy import stats as st

Expand All @@ -41,9 +43,8 @@
from pymc.logprob.utils import (
ParameterValueError,
local_check_parameter_to_ninf_switch,
rvs_in_graph,
)
from pymc.pytensorf import compile, floatX, inputvars
from pymc.pytensorf import compile, floatX, inputvars, rvs_in_graph

# This mode can be used for tests where model compilations takes the bulk of the runtime
# AND where we don't care about posterior numerical or sampling stability (e.g., when
Expand Down Expand Up @@ -971,8 +972,7 @@ def seeded_numpy_distribution_builder(dist_name: str) -> Callable:

def assert_no_rvs(vars: Sequence[Variable]) -> None:
"""Assert that there are no `MeasurableOp` nodes in a graph."""
rvs = rvs_in_graph(vars)
if rvs:
if rvs := rvs_in_graph(vars):
raise AssertionError(f"RV found in graph: {rvs}")


Expand Down Expand Up @@ -1086,3 +1086,28 @@ def test_model_inference(mock_pymc_sample):
pm.sample = original_sample
pm.Flat = original_flat
pm.HalfFlat = original_half_flat


def equal_computations_up_to_root(
xs: Sequence[Variable], ys: Sequence[Variable], ignore_rng_values=True
) -> bool:
# Check if graphs are equivalent even if root variables have distinct identities

x_graph_inputs = [var for var in graph_inputs(xs) if not isinstance(var, Constant)]
y_graph_inputs = [var for var in graph_inputs(ys) if not isinstance(var, Constant)]
if len(x_graph_inputs) != len(y_graph_inputs):
return False
for x, y in zip(x_graph_inputs, y_graph_inputs):
if x.type != y.type:
return False
if x.name != y.name:
return False
if isinstance(x, SharedVariable):
if not isinstance(y, SharedVariable):
return False
if isinstance(x.type, RandomType) and ignore_rng_values:
continue
if not x.type.values_eq(x.get_value(), y.get_value()):
return False

return equal_computations(xs, ys, in_xs=x_graph_inputs, in_ys=y_graph_inputs) # type: ignore[arg-type]
12 changes: 12 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ ignore = [
"D101", # Missing docstring in public class
"D102", # Missing docstring in public method
"D103", # Missing docstring in public function
"D104", # Missing docstring in public package
"D105", # Missing docstring in magic method
"D401", # Ignore Umbridge level of control
]

[tool.ruff.lint.pydocstyle]
Expand All @@ -66,6 +68,13 @@ lines-between-types = 1
"pymc/__init__.py" = [
"E402", # Module level import not at top of file
]
"pymc/dims/__init__.py" = [
"E402", # Module level import not at top of file
]
"pymc/dims/math.py" = [
"F401", # Module imported but unused
"F403", # 'from module import *' used; unable to detect undefined names
]
"pymc/stats/__init__.py" = [
"E402", # Module level import not at top of file
]
Expand All @@ -76,6 +85,9 @@ lines-between-types = 1
"scripts/run_mypy.py" = [
"T201", # No print statements
]
"scripts/publish_release_notes_to_discourse.py" = [
"T201", # No print statements
]
"*.ipynb" = [
"T201", # No print statements
]
Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pandas>=0.24.0
polyagamma
pre-commit>=2.8.0
pymc-sphinx-theme>=0.16.0
pytensor>=2.31.2,<2.32
pytensor>=2.31.7,<2.32
pytest-cov>=2.5
pytest>=3.0
rich>=13.7.1
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ cachetools>=4.2.1
cloudpickle
numpy>=1.25.0
pandas>=0.24.0
pytensor>=2.31.2,<2.32
pytensor>=2.31.7,<2.32
rich>=13.7.1
scipy>=1.4.1
threadpoolctl>=3.1.0,<4.0.0
Expand Down
135 changes: 135 additions & 0 deletions scripts/publish_release_notes_to_discourse.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
#!/usr/bin/env python3

import os

import requests


def load_config() -> dict[str, str]:
env_config = {
"DISCOURSE_URL": os.getenv("DISCOURSE_URL"),
"DISCOURSE_API_KEY": os.getenv("DISCOURSE_API_KEY"),
"DISCOURSE_USERNAME": os.getenv("DISCOURSE_USERNAME"),
"DISCOURSE_CATEGORY": os.getenv("DISCOURSE_CATEGORY"),
# Release information from GitHub
"RELEASE_TAG": os.getenv("RELEASE_TAG"),
"RELEASE_BODY": os.getenv("RELEASE_BODY"),
"RELEASE_URL": os.getenv("RELEASE_URL"),
"REPO_NAME": os.getenv("REPO_NAME"),
}

missing_env_values = {key: value for key, value in env_config.items() if value is None}
if missing_env_values:
raise RuntimeError(
f"Missing required environment variables: {', '.join(missing_env_values.keys())}"
)
return env_config


def find_category_id(config: dict[str, str]) -> int:
headers = {
"Api-Key": config["DISCOURSE_API_KEY"],
"Api-Username": config["DISCOURSE_USERNAME"],
"Content-Type": "application/json",
}

category_to_find = config["DISCOURSE_CATEGORY"].lower()
url = f"{config['DISCOURSE_URL']}/categories.json"
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
except Exception as e:
print("Error fetching categories")
raise

if data.get("category_list") and data["category_list"].get("categories"):
categories = data["category_list"]["categories"]

for category in categories:
cat_id = category.get("id")
cat_name = category.get("name")
if cat_name.lower() == category_to_find:
return int(cat_id)

raise ValueError(f"Category '{category_to_find}' not found")


def format_release_content(config: dict[str, str]) -> tuple[str, str]:
title = f"🚀 Release {config['RELEASE_TAG']}"
repo_name = config["REPO_NAME"].split("/")[1]
content = f"""A new release of **{repo_name}** is now available!
## 📦 Release Information
- **Version:** `{config["RELEASE_TAG"]}`
- **Repository:** [{config["REPO_NAME"]}](https://github.com/{config["REPO_NAME"]})
- **Release Page:** [View on GitHub]({config["RELEASE_URL"]})
- Note: It may take some time for the release to appear on PyPI and conda-forge.
## đź“‹ Release Notes
{config["RELEASE_BODY"]}
---
*This post was automatically generated from the GitHub release.*
"""

return title, content


def publish_release_to_discourse(config: dict[str, str]) -> bool:
print("🎯 GitHub Release to Discourse Publisher")
print(f"Release: {config['RELEASE_TAG']}")
print(f"Repository: {config['REPO_NAME']}")
print(f"Target Forum: {config['DISCOURSE_URL']}")
print(f"Target Category: {config['DISCOURSE_CATEGORY']}")
print("-" * 50)

category_id = find_category_id(config)
print(f"Publishing to category: {config['DISCOURSE_CATEGORY']} (ID: {category_id})")

# Format the release content
title, content = format_release_content(config)

# Create the topic data
topic_data = {"title": title, "raw": content, "category": category_id}

# Post to Discourse
headers = {
"Api-Key": config["DISCOURSE_API_KEY"],
"Api-Username": config["DISCOURSE_USERNAME"],
"Content-Type": "application/json",
}
url = f"{config['DISCOURSE_URL']}/posts.json"

try:
response = requests.post(url, headers=headers, data=topic_data)
response.raise_for_status()

data = response.json()
topic_id = data.get("topic_id")
post_id = data.get("id")

print("âś… Release published successfully!")
print(f"Topic ID: {topic_id}")
print(f"Post ID: {post_id}")
print(f"URL: {config['DISCOURSE_URL']}/t/{topic_id}")
return True

except requests.exceptions.RequestException as e:
print(f"❌ Error publishing release: {e}")
if hasattr(e, "response") and e.response is not None:
print(f"Response status: {e.response.status_code}")
try:
error_data = e.response.json()
print(f"Error details: {error_data}")
except Exception:
print(f"Response content: {e.response.text}")
raise


if __name__ == "__main__":
config = load_config()
publish_release_to_discourse(config)
25 changes: 25 additions & 0 deletions tests/backends/test_arviz.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings

import numpy as np
Expand Down Expand Up @@ -848,3 +849,27 @@ def test_zero_size(self):
assert tuple(pl[0]) == ("x",)
assert pl[0]["x"].shape == (0, 5)
assert pl[0]["x"].dtype == np.float64


def test_incompatible_coordinate_lengths():
with pm.Model(coords={"a": [-1, -2, -3]}) as m:
x = pm.Normal("x", dims="a")
y = pm.Deterministic("y", x[1:], dims=("a",))

with pytest.warns(
UserWarning,
match=re.escape(
"Incompatible coordinate length of 3 for dimension 'a' of variable 'y'"
),
):
prior = pm.sample_prior_predictive(draws=1).prior.squeeze(("chain", "draw"))
assert prior.x.dims == prior.y.dims == ("a",)
assert prior.x.shape == prior.y.shape == (3,)
assert np.isnan(prior.y.values[-1])
assert list(prior.coords["a"]) == [0, 1, 2]

pm.backends.arviz.RAISE_ON_INCOMPATIBLE_COORD_LENGTHS = True
with pytest.raises(ValueError):
pm.sample_prior_predictive(draws=1)

pm.backends.arviz.RAISE_ON_INCOMPATIBLE_COORD_LENGTHS = False
13 changes: 13 additions & 0 deletions tests/dims/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
13 changes: 13 additions & 0 deletions tests/dims/distributions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright 2025 - present The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Loading