| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
|
|
||
|
|
||
| def __init__(): | ||
| """Make PyMC aware of the xtensor functionality. | ||
| This should be done eagerly once development matures. | ||
| """ | ||
| import datetime | ||
| import warnings | ||
|
|
||
| from pytensor.compile import optdb | ||
|
|
||
| from pymc.initial_point import initial_point_rewrites_db | ||
| from pymc.logprob.abstract import MeasurableOp | ||
| from pymc.logprob.rewriting import logprob_rewrites_db | ||
|
|
||
| # Filter PyTensor xtensor warning, we emmit our own warning | ||
| with warnings.catch_warnings(): | ||
| warnings.simplefilter("ignore", UserWarning) | ||
| import pytensor.xtensor | ||
|
|
||
| from pytensor.xtensor.vectorization import XRV | ||
|
|
||
| # Make PyMC aware of xtensor functionality | ||
| MeasurableOp.register(XRV) | ||
| logprob_rewrites_db.register( | ||
| "pre_lower_xtensor", optdb.query("+lower_xtensor"), "basic", position=0.1 | ||
| ) | ||
| logprob_rewrites_db.register( | ||
| "post_lower_xtensor", optdb.query("+lower_xtensor"), "cleanup", position=5.1 | ||
| ) | ||
| initial_point_rewrites_db.register( | ||
| "lower_xtensor", optdb.query("+lower_xtensor"), "basic", position=0.1 | ||
| ) | ||
|
|
||
| # TODO: Better model of probability of bugs | ||
| day_of_conception = datetime.date(2025, 6, 17) | ||
| day_of_last_bug = datetime.date(2025, 6, 30) | ||
| today = datetime.date.today() | ||
| days_with_bugs = (day_of_last_bug - day_of_conception).days | ||
| days_without_bugs = (today - day_of_last_bug).days | ||
| p = 1 - (days_without_bugs / (days_without_bugs + days_with_bugs + 10)) | ||
| if p > 0.05: | ||
| warnings.warn( | ||
| f"The `pymc.dims` module is experimental and may contain critical bugs (p={p:.3f}).\n" | ||
| "Please report any issues you encounter at https://github.com/pymc-devs/pymc/issues.\n" | ||
| "API changes are expected in future releases.\n", | ||
| UserWarning, | ||
| stacklevel=2, | ||
| ) | ||
|
|
||
|
|
||
| __init__() | ||
| del __init__ | ||
|
|
||
| from pytensor.xtensor import as_xtensor, broadcast, concat, dot, full_like, ones_like, zeros_like | ||
| from pytensor.xtensor.basic import tensor_from_xtensor | ||
|
|
||
| from pymc.dims import math | ||
| from pymc.dims.distributions import * | ||
| from pymc.dims.model import Data, Deterministic, Potential |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from pymc.dims.distributions.scalar import * | ||
| from pymc.dims.distributions.vector import * |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,284 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from collections.abc import Callable, Sequence | ||
| from itertools import chain | ||
| from typing import cast | ||
|
|
||
| import numpy as np | ||
|
|
||
| from pytensor.graph import node_rewriter | ||
| from pytensor.graph.basic import Variable | ||
| from pytensor.tensor.elemwise import DimShuffle | ||
| from pytensor.tensor.random.op import RandomVariable | ||
| from pytensor.xtensor import as_xtensor | ||
| from pytensor.xtensor.basic import XTensorFromTensor, xtensor_from_tensor | ||
| from pytensor.xtensor.type import XTensorVariable | ||
|
|
||
| from pymc import SymbolicRandomVariable, modelcontext | ||
| from pymc.dims.distributions.transforms import DimTransform, log_odds_transform, log_transform | ||
| from pymc.distributions.distribution import _support_point, support_point | ||
| from pymc.distributions.shape_utils import DimsWithEllipsis, convert_dims_with_ellipsis | ||
| from pymc.logprob.abstract import MeasurableOp, _logprob | ||
| from pymc.logprob.rewriting import measurable_ir_rewrites_db | ||
| from pymc.logprob.tensor import MeasurableDimShuffle | ||
| from pymc.logprob.utils import filter_measurable_variables | ||
| from pymc.util import UNSET | ||
|
|
||
|
|
||
| @_support_point.register(DimShuffle) | ||
| def dimshuffle_support_point(ds_op, _, rv): | ||
| # We implement support point for DimShuffle because | ||
| # DimDistribution can register a transposed version of a variable. | ||
|
|
||
| return ds_op(support_point(rv)) | ||
|
|
||
|
|
||
| @_support_point.register(XTensorFromTensor) | ||
| def xtensor_from_tensor_support_point(xtensor_op, _, rv): | ||
| # We remove the xtensor_from_tensor operation, so initial_point doesn't have to do a further lowering | ||
| return xtensor_op(support_point(rv)) | ||
|
|
||
|
|
||
| class MeasurableXTensorFromTensor(MeasurableOp, XTensorFromTensor): | ||
| __props__ = ("dims", "core_dims") # type: ignore[assignment] | ||
|
|
||
| def __init__(self, dims, core_dims): | ||
| super().__init__(dims=dims) | ||
| self.core_dims = tuple(core_dims) if core_dims is not None else None | ||
|
|
||
|
|
||
| @node_rewriter([XTensorFromTensor]) | ||
| def find_measurable_xtensor_from_tensor(fgraph, node) -> list[XTensorVariable] | None: | ||
| if isinstance(node.op, MeasurableXTensorFromTensor): | ||
| return None | ||
|
|
||
| xs = filter_measurable_variables(node.inputs) | ||
|
|
||
| if not xs: | ||
| # Check if we have a transposition instead | ||
| # The rewrite that introduces measurable tranpsoses refuses to apply to multivariate RVs | ||
| # So we have a chance of inferring the core dims! | ||
| [ds] = node.inputs | ||
| ds_node = ds.owner | ||
| if not ( | ||
| ds_node is not None | ||
| and isinstance(ds_node.op, DimShuffle) | ||
| and ds_node.op.is_transpose | ||
| and filter_measurable_variables(ds_node.inputs) | ||
| ): | ||
| return None | ||
| [x] = ds_node.inputs | ||
| if not ( | ||
| x.owner is not None and isinstance(x.owner.op, RandomVariable | SymbolicRandomVariable) | ||
| ): | ||
| return None | ||
|
|
||
| measurable_x = MeasurableDimShuffle(**ds_node.op._props_dict())(x) # type: ignore[attr-defined] | ||
|
|
||
| ndim_supp = x.owner.op.ndim_supp | ||
| if ndim_supp: | ||
| inverse_transpose = np.argsort(ds_node.op.shuffle) | ||
| dims = node.op.dims | ||
| dims_before_transpose = tuple(dims[i] for i in inverse_transpose) | ||
| core_dims = dims_before_transpose[-ndim_supp:] | ||
| else: | ||
| core_dims = () | ||
|
|
||
| new_out = MeasurableXTensorFromTensor(dims=node.op.dims, core_dims=core_dims)(measurable_x) | ||
| else: | ||
| # If this happens we know there's no measurable transpose in between and we can | ||
| # safely infer the core_dims positionally when the inner logp is returned | ||
| new_out = MeasurableXTensorFromTensor(dims=node.op.dims, core_dims=None)(*node.inputs) | ||
| return [cast(XTensorVariable, new_out)] | ||
|
|
||
|
|
||
| @_logprob.register(MeasurableXTensorFromTensor) | ||
| def measurable_xtensor_from_tensor(op, values, rv, **kwargs): | ||
| rv_logp = _logprob(rv.owner.op, tuple(v.values for v in values), *rv.owner.inputs, **kwargs) | ||
| if op.core_dims is None: | ||
| # The core_dims of the inner rv are on the right | ||
| dims = op.dims[: rv_logp.ndim] | ||
| else: | ||
| # We inferred where the core_dims are! | ||
| dims = [d for d in op.dims if d not in op.core_dims] | ||
| return xtensor_from_tensor(rv_logp, dims=dims) | ||
|
|
||
|
|
||
| measurable_ir_rewrites_db.register( | ||
| "measurable_xtensor_from_tensor", find_measurable_xtensor_from_tensor, "basic", "xtensor" | ||
| ) | ||
|
|
||
|
|
||
| class DimDistribution: | ||
| """Base class for PyMC distribution that wrap pytensor.xtensor.random operations, and follow xarray-like semantics.""" | ||
|
|
||
| xrv_op: Callable | ||
| default_transform: DimTransform | None = None | ||
|
|
||
| @staticmethod | ||
| def _as_xtensor(x): | ||
| try: | ||
| return as_xtensor(x) | ||
| except TypeError: | ||
| raise ValueError( | ||
| f"Variable {x} must have dims associated with it.\n" | ||
| "To avoid subtle bugs, PyMC does not make any assumptions about the dims of parameters.\n" | ||
| "Use `as_xtensor` with the `dims` keyword argument to specify the dims explicitly." | ||
| ) | ||
|
|
||
| def __new__( | ||
| cls, | ||
| name: str, | ||
| *dist_params, | ||
| dims: DimsWithEllipsis | None = None, | ||
| initval=None, | ||
| observed=None, | ||
| total_size=None, | ||
| transform=UNSET, | ||
| default_transform=UNSET, | ||
| model=None, | ||
| **kwargs, | ||
| ): | ||
| try: | ||
| model = modelcontext(model) | ||
| except TypeError: | ||
| raise TypeError( | ||
| "No model on context stack, which is needed to instantiate distributions. " | ||
| "Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution." | ||
| ) | ||
|
|
||
| if not isinstance(name, str): | ||
| raise TypeError(f"Name needs to be a string but got: {name}") | ||
|
|
||
| dims = convert_dims_with_ellipsis(dims) | ||
| if dims is None: | ||
| dim_lengths = {} | ||
| else: | ||
| try: | ||
| dim_lengths = {dim: model.dim_lengths[dim] for dim in dims if dim is not Ellipsis} | ||
| except KeyError: | ||
| raise ValueError( | ||
| f"Not all dims {dims} are part of the model coords. " | ||
| f"Add them at initialization time or use `model.add_coord` before defining the distribution." | ||
| ) | ||
|
|
||
| if observed is not None: | ||
| observed = cls._as_xtensor(observed) | ||
|
|
||
| # Propagate observed dims to dim_lengths | ||
| for observed_dim in observed.type.dims: | ||
| if observed_dim not in dim_lengths: | ||
| dim_lengths[observed_dim] = model.dim_lengths[observed_dim] | ||
|
|
||
| rv = cls.dist(*dist_params, dim_lengths=dim_lengths, **kwargs) | ||
|
|
||
| # User provided dims must specify all dims or use ellipsis | ||
| if dims is not None: | ||
| if (... not in dims) and (set(dims) != set(rv.type.dims)): | ||
| raise ValueError( | ||
| f"Provided dims {dims} do not match the distribution's output dims {rv.type.dims}. " | ||
| "Use ellipsis to specify all other dimensions." | ||
| ) | ||
| # Use provided dims to transpose the output to the desired order | ||
| rv = rv.transpose(*dims) | ||
|
|
||
| rv_dims = rv.type.dims | ||
| if observed is None: | ||
| if default_transform is UNSET: | ||
| default_transform = cls.default_transform | ||
| else: | ||
| # Align observed dims with those of the RV | ||
| # TODO: If this fails give a more informative error message | ||
| observed = observed.transpose(*rv_dims) | ||
|
|
||
| # Check user didn't pass regular transforms | ||
| if transform not in (UNSET, None): | ||
| if not isinstance(transform, DimTransform): | ||
| raise TypeError( | ||
| f"Transform must be a DimTransform, form pymc.dims.transforms, but got {type(transform)}." | ||
| ) | ||
| if default_transform not in (UNSET, None): | ||
| if not isinstance(default_transform, DimTransform): | ||
| raise TypeError( | ||
| f"default_transform must be a DimTransform, from pymc.dims.transforms, but got {type(default_transform)}." | ||
| ) | ||
|
|
||
| rv = model.register_rv( | ||
| rv, | ||
| name=name, | ||
| observed=observed, | ||
| total_size=total_size, | ||
| dims=rv_dims, | ||
| transform=transform, | ||
| default_transform=default_transform, | ||
| initval=initval, | ||
| ) | ||
|
|
||
| return as_xtensor(rv, dims=rv_dims) | ||
|
|
||
| @classmethod | ||
| def dist( | ||
| cls, | ||
| dist_params, | ||
| *, | ||
| dim_lengths: dict[str, Variable | int] | None = None, | ||
| core_dims: str | Sequence[str] | None = None, | ||
| **kwargs, | ||
| ) -> XTensorVariable: | ||
| for invalid_kwarg in ("size", "shape", "dims"): | ||
| if invalid_kwarg in kwargs: | ||
| raise TypeError(f"DimDistribution does not accept {invalid_kwarg} argument.") | ||
|
|
||
| # XRV requires only extra_dims, not dims | ||
| dist_params = [cls._as_xtensor(param) for param in dist_params] | ||
|
|
||
| if dim_lengths is None: | ||
| extra_dims = None | ||
| else: | ||
| # Exclude dims that are implied by the parameters or core_dims | ||
| implied_dims = set(chain.from_iterable(param.type.dims for param in dist_params)) | ||
| if core_dims is not None: | ||
| if isinstance(core_dims, str): | ||
| implied_dims.add(core_dims) | ||
| else: | ||
| implied_dims.update(core_dims) | ||
|
|
||
| extra_dims = { | ||
| dim: length for dim, length in dim_lengths.items() if dim not in implied_dims | ||
| } | ||
| return cls.xrv_op(*dist_params, extra_dims=extra_dims, core_dims=core_dims, **kwargs) | ||
|
|
||
|
|
||
| class VectorDimDistribution(DimDistribution): | ||
| @classmethod | ||
| def dist(self, *args, core_dims: str | Sequence[str] | None = None, **kwargs): | ||
| # Add a helpful error message if core_dims is not provided | ||
| if core_dims is None: | ||
| raise ValueError( | ||
| f"{self.__name__} requires core_dims to be specified, as it involves non-scalar inputs or outputs." | ||
| "Check the documentation of the distribution for details." | ||
| ) | ||
| return super().dist(*args, core_dims=core_dims, **kwargs) | ||
|
|
||
|
|
||
| class PositiveDimDistribution(DimDistribution): | ||
| """Base class for positive continuous distributions.""" | ||
|
|
||
| default_transform = log_transform | ||
|
|
||
|
|
||
| class UnitDimDistribution(DimDistribution): | ||
| """Base class for unit-valued distributions.""" | ||
|
|
||
| default_transform = log_odds_transform |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,191 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| import pytensor.xtensor as ptx | ||
| import pytensor.xtensor.random as pxr | ||
|
|
||
| from pytensor.xtensor import as_xtensor | ||
|
|
||
| from pymc.dims.distributions.core import ( | ||
| DimDistribution, | ||
| PositiveDimDistribution, | ||
| UnitDimDistribution, | ||
| ) | ||
| from pymc.distributions.continuous import Beta as RegularBeta | ||
| from pymc.distributions.continuous import Gamma as RegularGamma | ||
| from pymc.distributions.continuous import HalfStudentTRV, flat, halfflat | ||
|
|
||
|
|
||
| def _get_sigma_from_either_sigma_or_tau(*, sigma, tau): | ||
| if sigma is not None and tau is not None: | ||
| raise ValueError("Can't pass both tau and sigma") | ||
|
|
||
| if sigma is None and tau is None: | ||
| return 1.0 | ||
|
|
||
| if sigma is not None: | ||
| return sigma | ||
|
|
||
| return ptx.math.reciprocal(ptx.math.sqrt(tau)) | ||
|
|
||
|
|
||
| class Flat(DimDistribution): | ||
| xrv_op = pxr.as_xrv(flat) | ||
|
|
||
| @classmethod | ||
| def dist(cls, **kwargs): | ||
| return super().dist([], **kwargs) | ||
|
|
||
|
|
||
| class HalfFlat(PositiveDimDistribution): | ||
| xrv_op = pxr.as_xrv(halfflat, [], ()) | ||
|
|
||
| @classmethod | ||
| def dist(cls, **kwargs): | ||
| return super().dist([], **kwargs) | ||
|
|
||
|
|
||
| class Normal(DimDistribution): | ||
| xrv_op = pxr.normal | ||
|
|
||
| @classmethod | ||
| def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs): | ||
| sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau) | ||
| return super().dist([mu, sigma], **kwargs) | ||
|
|
||
|
|
||
| class HalfNormal(PositiveDimDistribution): | ||
| xrv_op = pxr.halfnormal | ||
|
|
||
| @classmethod | ||
| def dist(cls, sigma=None, *, tau=None, **kwargs): | ||
| sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau) | ||
| return super().dist([0.0, sigma], **kwargs) | ||
|
|
||
|
|
||
| class LogNormal(PositiveDimDistribution): | ||
| xrv_op = pxr.lognormal | ||
|
|
||
| @classmethod | ||
| def dist(cls, mu=0, sigma=None, *, tau=None, **kwargs): | ||
| sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=tau) | ||
| return super().dist([mu, sigma], **kwargs) | ||
|
|
||
|
|
||
| class StudentT(DimDistribution): | ||
| xrv_op = pxr.t | ||
|
|
||
| @classmethod | ||
| def dist(cls, nu, mu=0, sigma=None, *, lam=None, **kwargs): | ||
| sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=lam) | ||
| return super().dist([nu, mu, sigma], **kwargs) | ||
|
|
||
|
|
||
| class HalfStudentT(PositiveDimDistribution): | ||
| @classmethod | ||
| def dist(cls, nu, sigma=None, *, lam=None, **kwargs): | ||
| sigma = _get_sigma_from_either_sigma_or_tau(sigma=sigma, tau=lam) | ||
| return super().dist([nu, sigma], **kwargs) | ||
|
|
||
| @classmethod | ||
| def xrv_op(self, nu, sigma, core_dims=None, extra_dims=None, rng=None): | ||
| nu = as_xtensor(nu) | ||
| sigma = as_xtensor(sigma) | ||
| core_rv = HalfStudentTRV.rv_op(nu=nu.values, sigma=sigma.values).owner.op | ||
| xop = pxr.as_xrv(core_rv) | ||
| return xop(nu, sigma, core_dims=core_dims, extra_dims=extra_dims, rng=rng) | ||
|
|
||
|
|
||
| class Cauchy(DimDistribution): | ||
| xrv_op = pxr.cauchy | ||
|
|
||
| @classmethod | ||
| def dist(cls, alpha, beta, **kwargs): | ||
| return super().dist([alpha, beta], **kwargs) | ||
|
|
||
|
|
||
| class HalfCauchy(PositiveDimDistribution): | ||
| xrv_op = pxr.halfcauchy | ||
|
|
||
| @classmethod | ||
| def dist(cls, beta, **kwargs): | ||
| return super().dist([0.0, beta], **kwargs) | ||
|
|
||
|
|
||
| class Beta(UnitDimDistribution): | ||
| xrv_op = pxr.beta | ||
|
|
||
| @classmethod | ||
| def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, nu=None, **kwargs): | ||
| alpha, beta = RegularBeta.get_alpha_beta(alpha=alpha, beta=beta, mu=mu, sigma=sigma, nu=nu) | ||
| return super().dist([alpha, beta], **kwargs) | ||
|
|
||
|
|
||
| class Laplace(DimDistribution): | ||
| xrv_op = pxr.laplace | ||
|
|
||
| @classmethod | ||
| def dist(cls, mu=0, b=1, **kwargs): | ||
| return super().dist([mu, b], **kwargs) | ||
|
|
||
|
|
||
| class Exponential(PositiveDimDistribution): | ||
| xrv_op = pxr.exponential | ||
|
|
||
| @classmethod | ||
| def dist(cls, lam=None, *, scale=None, **kwargs): | ||
| if lam is None and scale is None: | ||
| scale = 1.0 | ||
| elif lam is not None and scale is not None: | ||
| raise ValueError("Cannot pass both 'lam' and 'scale'. Use one of them.") | ||
| elif lam is not None: | ||
| scale = 1 / lam | ||
| return super().dist([scale], **kwargs) | ||
|
|
||
|
|
||
| class Gamma(PositiveDimDistribution): | ||
| xrv_op = pxr.gamma | ||
|
|
||
| @classmethod | ||
| def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs): | ||
| if (alpha is not None) and (beta is not None): | ||
| pass | ||
| elif (mu is not None) and (sigma is not None): | ||
| # Use sign of sigma to not let negative sigma fly by | ||
| alpha = (mu**2 / sigma**2) * ptx.math.sign(sigma) | ||
| beta = mu / sigma**2 | ||
| else: | ||
| raise ValueError( | ||
| "Incompatible parameterization. Either use alpha and beta, or mu and sigma." | ||
| ) | ||
| alpha, beta = RegularGamma.get_alpha_beta(alpha=alpha, beta=beta, mu=mu, sigma=sigma) | ||
| return super().dist([alpha, ptx.math.reciprocal(beta)], **kwargs) | ||
|
|
||
|
|
||
| class InverseGamma(PositiveDimDistribution): | ||
| xrv_op = pxr.invgamma | ||
|
|
||
| @classmethod | ||
| def dist(cls, alpha=None, beta=None, *, mu=None, sigma=None, **kwargs): | ||
| if alpha is not None: | ||
| if beta is None: | ||
| beta = 1.0 | ||
| elif (mu is not None) and (sigma is not None): | ||
| # Use sign of sigma to not let negative sigma fly by | ||
| alpha = ((2 * sigma**2 + mu**2) / sigma**2) * ptx.math.sign(sigma) | ||
| beta = mu * (mu**2 + sigma**2) / sigma**2 | ||
| else: | ||
| raise ValueError( | ||
| "Incompatible parameterization. Either use alpha and (optionally) beta, or mu and sigma" | ||
| ) | ||
| return super().dist([alpha, beta], **kwargs) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| import pytensor.tensor as pt | ||
| import pytensor.xtensor as ptx | ||
|
|
||
| from pymc.logprob.transforms import Transform | ||
|
|
||
|
|
||
| class DimTransform(Transform): | ||
| """Base class for transforms that are applied to dim distriubtions.""" | ||
|
|
||
|
|
||
| class LogTransform(DimTransform): | ||
| name = "log" | ||
|
|
||
| def forward(self, value, *inputs): | ||
| return ptx.math.log(value) | ||
|
|
||
| def backward(self, value, *inputs): | ||
| return ptx.math.exp(value) | ||
|
|
||
| def log_jac_det(self, value, *inputs): | ||
| return value | ||
|
|
||
|
|
||
| log_transform = LogTransform() | ||
|
|
||
|
|
||
| class LogOddsTransform(DimTransform): | ||
| name = "logodds" | ||
|
|
||
| def backward(self, value, *inputs): | ||
| return ptx.math.expit(value) | ||
|
|
||
| def forward(self, value, *inputs): | ||
| return ptx.math.log(value / (1 - value)) | ||
|
|
||
| def log_jac_det(self, value, *inputs): | ||
| sigmoid_value = ptx.math.sigmoid(value) | ||
| return ptx.math.log(sigmoid_value) + ptx.math.log1p(-sigmoid_value) | ||
|
|
||
|
|
||
| log_odds_transform = LogOddsTransform() | ||
|
|
||
|
|
||
| class ZeroSumTransform(DimTransform): | ||
| name = "zerosum" | ||
|
|
||
| def __init__(self, dims: tuple[str, ...]): | ||
| self.dims = dims | ||
|
|
||
| @staticmethod | ||
| def extend_dim(array, dim): | ||
| n = (array.sizes[dim] + 1).astype("floatX") | ||
| sum_vals = array.sum(dim) | ||
| norm = sum_vals / (pt.sqrt(n) + n) | ||
| fill_val = norm - sum_vals / pt.sqrt(n) | ||
|
|
||
| out = ptx.concat([array, fill_val], dim=dim) | ||
| return out - norm | ||
|
|
||
| @staticmethod | ||
| def reduce_dim(array, dim): | ||
| n = array.sizes[dim].astype("floatX") | ||
| last = array.isel({dim: -1}) | ||
|
|
||
| sum_vals = -last * pt.sqrt(n) | ||
| norm = sum_vals / (pt.sqrt(n) + n) | ||
| return array.isel({dim: slice(None, -1)}) + norm | ||
|
|
||
| def forward(self, value, *rv_inputs): | ||
| for dim in self.dims: | ||
| value = self.reduce_dim(value, dim=dim) | ||
| return value | ||
|
|
||
| def backward(self, value, *rv_inputs): | ||
| for dim in self.dims: | ||
| value = self.extend_dim(value, dim=dim) | ||
| return value | ||
|
|
||
| def log_jac_det(self, value, *rv_inputs): | ||
| # Use following once broadcast_like is implemented | ||
| # as_xtensor(0).broadcast_like(value, exclude=self.dims)` | ||
| return value.sum(self.dims) * 0 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,198 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| import pytensor.xtensor as ptx | ||
| import pytensor.xtensor.random as ptxr | ||
|
|
||
| from pytensor.tensor import as_tensor | ||
| from pytensor.xtensor import as_xtensor | ||
| from pytensor.xtensor import random as pxr | ||
|
|
||
| from pymc.dims.distributions.core import VectorDimDistribution | ||
| from pymc.dims.distributions.transforms import ZeroSumTransform | ||
| from pymc.distributions.multivariate import ZeroSumNormalRV | ||
| from pymc.util import UNSET | ||
|
|
||
|
|
||
| class Categorical(VectorDimDistribution): | ||
| """Categorical distribution. | ||
| Parameters | ||
| ---------- | ||
| p : xtensor_like, optional | ||
| Probabilities of each category. Must sum to 1 along the core dimension. | ||
| Must be provided if `logit_p` is not specified. | ||
| logit_p : xtensor_like, optional | ||
| Alternative parametrization using logits. Must be provided if `p` is not specified. | ||
| core_dims : str | ||
| The core dimension of the distribution, which represents the categories. | ||
| The dimension must be present in `p` or `logit_p`. | ||
| **kwargs | ||
| Other keyword arguments used to define the distribution. | ||
| Returns | ||
| ------- | ||
| XTensorVariable | ||
| An xtensor variable representing the categorical distribution. | ||
| The output does not contain the core dimension, as it is absorbed into the distribution. | ||
| """ | ||
|
|
||
| xrv_op = ptxr.categorical | ||
|
|
||
| @classmethod | ||
| def dist(cls, p=None, *, logit_p=None, core_dims=None, **kwargs): | ||
| if p is not None and logit_p is not None: | ||
| raise ValueError("Incompatible parametrization. Can't specify both p and logit_p.") | ||
| elif p is None and logit_p is None: | ||
| raise ValueError("Incompatible parametrization. Must specify either p or logit_p.") | ||
|
|
||
| if logit_p is not None: | ||
| p = ptx.math.softmax(logit_p, dim=core_dims) | ||
| return super().dist([p], core_dims=core_dims, **kwargs) | ||
|
|
||
|
|
||
| class MvNormal(VectorDimDistribution): | ||
| """Multivariate Normal distribution. | ||
| Parameters | ||
| ---------- | ||
| mu : xtensor_like | ||
| Mean vector of the distribution. | ||
| cov : xtensor_like, optional | ||
| Covariance matrix of the distribution. Only one of `cov` or `chol` must be provided. | ||
| chol : xtensor_like, optional | ||
| Cholesky decomposition of the covariance matrix. only one of `cov` or `chol` must be provided. | ||
| lower : bool, default True | ||
| If True, the Cholesky decomposition is assumed to be lower triangular. | ||
| If False, it is assumed to be upper triangular. | ||
| core_dims: Sequence of string | ||
| Sequence of two strings representing the core dimensions of the distribution. | ||
| The two dimensions must be present in `cov` or `chol`, and exactly one must also be present in `mu`. | ||
| **kwargs | ||
| Additional keyword arguments used to define the distribution. | ||
| Returns | ||
| ------- | ||
| XTensorVariable | ||
| An xtensor variable representing the multivariate normal distribution. | ||
| The output contains the core dimension that is shared between `mu` and `cov` or `chol`. | ||
| """ | ||
|
|
||
| xrv_op = pxr.multivariate_normal | ||
|
|
||
| @classmethod | ||
| def dist(cls, mu, cov=None, *, chol=None, lower=True, core_dims=None, **kwargs): | ||
| if "tau" in kwargs: | ||
| raise NotImplementedError("MvNormal does not support 'tau' parameter.") | ||
|
|
||
| if not (isinstance(core_dims, tuple | list) and len(core_dims) == 2): | ||
| raise ValueError("MvNormal requires 2 core_dims") | ||
|
|
||
| if cov is None and chol is None: | ||
| raise ValueError("Either 'cov' or 'chol' must be provided.") | ||
|
|
||
| if chol is not None: | ||
| d0, d1 = core_dims | ||
| if not lower: | ||
| # By logical symmetry this must be the only correct way to implement lower | ||
| # We refuse to test it because it is not useful | ||
| d1, d0 = d0, d1 | ||
|
|
||
| chol = cls._as_xtensor(chol) | ||
| # chol @ chol.T in xarray semantics requires a rename | ||
| safe_name = "_" | ||
| if "_" in chol.type.dims: | ||
| safe_name *= max(map(len, chol.type.dims)) + 1 | ||
| cov = chol.dot(chol.rename({d0: safe_name}), dim=d1).rename({safe_name: d1}) | ||
|
|
||
| return super().dist([mu, cov], core_dims=core_dims, **kwargs) | ||
|
|
||
|
|
||
| class ZeroSumNormal(VectorDimDistribution): | ||
| """Zero-sum multivariate normal distribution. | ||
| Parameters | ||
| ---------- | ||
| sigma : xtensor_like, optional | ||
| The standard deviation of the underlying unconstrained normal distribution. | ||
| Defaults to 1.0. It cannot have core dimensions. | ||
| core_dims : Sequence of str, optional | ||
| The axes along which the zero-sum constraint is applied. | ||
| **kwargs | ||
| Additional keyword arguments used to define the distribution. | ||
| Returns | ||
| ------- | ||
| XTensorVariable | ||
| An xtensor variable representing the zero-sum multivariate normal distribution. | ||
| """ | ||
|
|
||
| @classmethod | ||
| def __new__( | ||
| cls, *args, core_dims=None, dims=None, default_transform=UNSET, observed=None, **kwargs | ||
| ): | ||
| if core_dims is not None: | ||
| if isinstance(core_dims, str): | ||
| core_dims = (core_dims,) | ||
|
|
||
| # Create default_transform | ||
| if observed is None and default_transform is UNSET: | ||
| default_transform = ZeroSumTransform(dims=core_dims) | ||
|
|
||
| # If the user didn't specify dims, take it from core_dims | ||
| # We need them to be forwarded to dist in the `dim_lenghts` argument | ||
| if dims is None and core_dims is not None: | ||
| dims = (..., *core_dims) | ||
|
|
||
| return super().__new__( | ||
| *args, | ||
| core_dims=core_dims, | ||
| dims=dims, | ||
| default_transform=default_transform, | ||
| observed=observed, | ||
| **kwargs, | ||
| ) | ||
|
|
||
| @classmethod | ||
| def dist(cls, sigma=1.0, *, core_dims=None, dim_lengths, **kwargs): | ||
| if isinstance(core_dims, str): | ||
| core_dims = (core_dims,) | ||
| if core_dims is None or len(core_dims) == 0: | ||
| raise ValueError("ZeroSumNormal requires atleast 1 core_dims") | ||
|
|
||
| support_dims = as_xtensor( | ||
| as_tensor([dim_lengths[core_dim] for core_dim in core_dims]), dims=("_",) | ||
| ) | ||
| sigma = cls._as_xtensor(sigma) | ||
|
|
||
| return super().dist( | ||
| [sigma, support_dims], core_dims=core_dims, dim_lengths=dim_lengths, **kwargs | ||
| ) | ||
|
|
||
| @classmethod | ||
| def xrv_op(self, sigma, support_dims, core_dims, extra_dims=None, rng=None): | ||
| sigma = as_xtensor(sigma) | ||
| support_dims = as_xtensor(support_dims, dims=("_",)) | ||
| support_shape = support_dims.values | ||
| core_rv = ZeroSumNormalRV.rv_op(sigma=sigma.values, support_shape=support_shape).owner.op | ||
| xop = pxr.as_xrv( | ||
| core_rv, | ||
| core_inps_dims_map=[(), (0,)], | ||
| core_out_dims_map=tuple(range(1, len(core_dims) + 1)), | ||
| ) | ||
| # Dummy "_" core dim to absorb the support_shape vector | ||
| # If ZeroSumNormal expected a scalar per support dim, this wouldn't be needed | ||
| return xop(sigma, support_dims, core_dims=("_", *core_dims), extra_dims=extra_dims, rng=rng) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from pytensor.xtensor import linalg | ||
| from pytensor.xtensor.math import * |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from collections.abc import Callable | ||
|
|
||
| from pytensor.tensor import TensorVariable | ||
| from pytensor.xtensor import as_xtensor | ||
| from pytensor.xtensor.type import XTensorVariable | ||
|
|
||
| from pymc.data import Data as RegularData | ||
| from pymc.distributions.shape_utils import ( | ||
| Dims, | ||
| DimsWithEllipsis, | ||
| convert_dims, | ||
| convert_dims_with_ellipsis, | ||
| ) | ||
| from pymc.model.core import Deterministic as RegularDeterministic | ||
| from pymc.model.core import Model, modelcontext | ||
| from pymc.model.core import Potential as RegularPotential | ||
|
|
||
|
|
||
| def Data( | ||
| name: str, value, dims: Dims = None, model: Model | None = None, **kwargs | ||
| ) -> XTensorVariable: | ||
| """Wrapper around pymc.Data that returns an XtensorVariable. | ||
| Dimensions are required if the input is not a scalar. | ||
| These are always forwarded to the model object. | ||
| """ | ||
| model = modelcontext(model) | ||
| dims = convert_dims(dims) # type: ignore[assignment] | ||
|
|
||
| with model: | ||
| value = RegularData(name, value, dims=dims, **kwargs) # type: ignore[arg-type] | ||
|
|
||
| dims = model.named_vars_to_dims[value.name] | ||
| if dims is None and value.ndim > 0: | ||
| raise ValueError("pymc.dims.Data requires dims to be specified for non-scalar data.") | ||
|
|
||
| return as_xtensor(value, dims=dims, name=name) # type: ignore[arg-type] | ||
|
|
||
|
|
||
| def _register_and_return_xtensor_variable( | ||
| name: str, | ||
| value: TensorVariable | XTensorVariable, | ||
| dims: DimsWithEllipsis | None, | ||
| model: Model | None, | ||
| registration_func: Callable, | ||
| ) -> XTensorVariable: | ||
| if isinstance(value, XTensorVariable): | ||
| dims = convert_dims_with_ellipsis(dims) | ||
| if dims is not None: | ||
| # If dims are provided, apply a transpose to align with the user expectation | ||
| value = value.transpose(*dims) | ||
| # Regardless of whether dims are provided, we now have them | ||
| dims = value.type.dims | ||
| else: | ||
| value = as_xtensor(value, dims=dims, name=name) # type: ignore[arg-type] | ||
| return registration_func(name, value, dims=dims, model=model) | ||
|
|
||
|
|
||
| def Deterministic( | ||
| name: str, value, dims: DimsWithEllipsis | None = None, model: Model | None = None | ||
| ) -> XTensorVariable: | ||
| """Wrapper around pymc.Deterministic that returns an XtensorVariable. | ||
| If the input is already an XTensorVariable, dims are optional. If dims are provided, the variable is aligned with them with a transpose. | ||
| If the input is not an XTensorVariable, it is converted to one using `as_xtensor`. Dims are required if the input is not a scalar. | ||
| The dimensions of the resulting XTensorVariable are always forwarded to the model object. | ||
| """ | ||
| return _register_and_return_xtensor_variable(name, value, dims, model, RegularDeterministic) | ||
|
|
||
|
|
||
| def Potential( | ||
| name: str, value, dims: DimsWithEllipsis | None = None, model: Model | None = None | ||
| ) -> XTensorVariable: | ||
| """Wrapper around pymc.Potential that returns an XtensorVariable. | ||
| If the input is already an XTensorVariable, dims are optional. If dims are provided, the variable is aligned with them with a transpose. | ||
| If the input is not an XTensorVariable, it is converted to one using `as_xtensor`. Dims are required if the input is not a scalar. | ||
| The dimensions of the resulting XTensorVariable are always forwarded to the model object. | ||
| """ | ||
| return _register_and_return_xtensor_variable(name, value, dims, model, RegularPotential) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,135 @@ | ||
| #!/usr/bin/env python3 | ||
|
|
||
| import os | ||
|
|
||
| import requests | ||
|
|
||
|
|
||
| def load_config() -> dict[str, str]: | ||
| env_config = { | ||
| "DISCOURSE_URL": os.getenv("DISCOURSE_URL"), | ||
| "DISCOURSE_API_KEY": os.getenv("DISCOURSE_API_KEY"), | ||
| "DISCOURSE_USERNAME": os.getenv("DISCOURSE_USERNAME"), | ||
| "DISCOURSE_CATEGORY": os.getenv("DISCOURSE_CATEGORY"), | ||
| # Release information from GitHub | ||
| "RELEASE_TAG": os.getenv("RELEASE_TAG"), | ||
| "RELEASE_BODY": os.getenv("RELEASE_BODY"), | ||
| "RELEASE_URL": os.getenv("RELEASE_URL"), | ||
| "REPO_NAME": os.getenv("REPO_NAME"), | ||
| } | ||
|
|
||
| missing_env_values = {key: value for key, value in env_config.items() if value is None} | ||
| if missing_env_values: | ||
| raise RuntimeError( | ||
| f"Missing required environment variables: {', '.join(missing_env_values.keys())}" | ||
| ) | ||
| return env_config | ||
|
|
||
|
|
||
| def find_category_id(config: dict[str, str]) -> int: | ||
| headers = { | ||
| "Api-Key": config["DISCOURSE_API_KEY"], | ||
| "Api-Username": config["DISCOURSE_USERNAME"], | ||
| "Content-Type": "application/json", | ||
| } | ||
|
|
||
| category_to_find = config["DISCOURSE_CATEGORY"].lower() | ||
| url = f"{config['DISCOURSE_URL']}/categories.json" | ||
| try: | ||
| response = requests.get(url, headers=headers) | ||
| response.raise_for_status() | ||
| data = response.json() | ||
| except Exception as e: | ||
| print("Error fetching categories") | ||
| raise | ||
|
|
||
| if data.get("category_list") and data["category_list"].get("categories"): | ||
| categories = data["category_list"]["categories"] | ||
|
|
||
| for category in categories: | ||
| cat_id = category.get("id") | ||
| cat_name = category.get("name") | ||
| if cat_name.lower() == category_to_find: | ||
| return int(cat_id) | ||
|
|
||
| raise ValueError(f"Category '{category_to_find}' not found") | ||
|
|
||
|
|
||
| def format_release_content(config: dict[str, str]) -> tuple[str, str]: | ||
| title = f"🚀 Release {config['RELEASE_TAG']}" | ||
| repo_name = config["REPO_NAME"].split("/")[1] | ||
| content = f"""A new release of **{repo_name}** is now available! | ||
| ## 📦 Release Information | ||
| - **Version:** `{config["RELEASE_TAG"]}` | ||
| - **Repository:** [{config["REPO_NAME"]}](https://github.com/{config["REPO_NAME"]}) | ||
| - **Release Page:** [View on GitHub]({config["RELEASE_URL"]}) | ||
| - Note: It may take some time for the release to appear on PyPI and conda-forge. | ||
| ## đź“‹ Release Notes | ||
| {config["RELEASE_BODY"]} | ||
| --- | ||
| *This post was automatically generated from the GitHub release.* | ||
| """ | ||
|
|
||
| return title, content | ||
|
|
||
|
|
||
| def publish_release_to_discourse(config: dict[str, str]) -> bool: | ||
| print("🎯 GitHub Release to Discourse Publisher") | ||
| print(f"Release: {config['RELEASE_TAG']}") | ||
| print(f"Repository: {config['REPO_NAME']}") | ||
| print(f"Target Forum: {config['DISCOURSE_URL']}") | ||
| print(f"Target Category: {config['DISCOURSE_CATEGORY']}") | ||
| print("-" * 50) | ||
|
|
||
| category_id = find_category_id(config) | ||
| print(f"Publishing to category: {config['DISCOURSE_CATEGORY']} (ID: {category_id})") | ||
|
|
||
| # Format the release content | ||
| title, content = format_release_content(config) | ||
|
|
||
| # Create the topic data | ||
| topic_data = {"title": title, "raw": content, "category": category_id} | ||
|
|
||
| # Post to Discourse | ||
| headers = { | ||
| "Api-Key": config["DISCOURSE_API_KEY"], | ||
| "Api-Username": config["DISCOURSE_USERNAME"], | ||
| "Content-Type": "application/json", | ||
| } | ||
| url = f"{config['DISCOURSE_URL']}/posts.json" | ||
|
|
||
| try: | ||
| response = requests.post(url, headers=headers, data=topic_data) | ||
| response.raise_for_status() | ||
|
|
||
| data = response.json() | ||
| topic_id = data.get("topic_id") | ||
| post_id = data.get("id") | ||
|
|
||
| print("âś… Release published successfully!") | ||
| print(f"Topic ID: {topic_id}") | ||
| print(f"Post ID: {post_id}") | ||
| print(f"URL: {config['DISCOURSE_URL']}/t/{topic_id}") | ||
| return True | ||
|
|
||
| except requests.exceptions.RequestException as e: | ||
| print(f"❌ Error publishing release: {e}") | ||
| if hasattr(e, "response") and e.response is not None: | ||
| print(f"Response status: {e.response.status_code}") | ||
| try: | ||
| error_data = e.response.json() | ||
| print(f"Error details: {error_data}") | ||
| except Exception: | ||
| print(f"Response content: {e.response.text}") | ||
| raise | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
| config = load_config() | ||
| publish_release_to_discourse(config) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| # Copyright 2025 - present The PyMC Developers | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. |