diff --git a/src/torchjd/autogram/_jacobian_computer.py b/src/torchjd/autogram/_jacobian_computer.py index a93bd5aa..adc90b06 100644 --- a/src/torchjd/autogram/_jacobian_computer.py +++ b/src/torchjd/autogram/_jacobian_computer.py @@ -181,7 +181,7 @@ def vmap( jac_outputs: tuple[Tensor, ...], args: tuple[PyTree, ...], kwargs: dict[str, PyTree], - ) -> tuple[Tensor, None]: # type: ignore[reportIncompatibleMethodOverride] + ) -> tuple[Tensor, None]: # ty: ignore[invalid-method-override] # There is a non-batched dimension # We do not vmap over the args, kwargs, or rg_outputs for the non-batched dimension generalized_jacobian = torch.vmap(compute_jacobian_fn, in_dims=in_dims[1:])( diff --git a/src/torchjd/autogram/_module_hook_manager.py b/src/torchjd/autogram/_module_hook_manager.py index 39ebf609..f85693da 100644 --- a/src/torchjd/autogram/_module_hook_manager.py +++ b/src/torchjd/autogram/_module_hook_manager.py @@ -174,7 +174,7 @@ def setup_context( ctx: Any, inputs: tuple, _, - ) -> None: # type: ignore[reportIncompatibleMethodOverride] + ) -> None: # ty: ignore[invalid-method-override] ctx.gramian_accumulation_phase = inputs[0] ctx.gramian_computer = inputs[1] ctx.args = inputs[2] diff --git a/src/torchjd/autojac/_utils.py b/src/torchjd/autojac/_utils.py index 0a0c9dfe..56d67ff8 100644 --- a/src/torchjd/autojac/_utils.py +++ b/src/torchjd/autojac/_utils.py @@ -179,7 +179,7 @@ def get_leaf_tensors(tensors: Iterable[Tensor], excluded: Iterable[Tensor]) -> O # accumulate_grads contains instances of AccumulateGrad, which contain a `variable` field. # They cannot be typed as such because AccumulateGrad is not public. - leaves = OrderedSet([g.variable for g in accumulate_grads]) # type: ignore[attr-defined] + leaves = OrderedSet([g.variable for g in accumulate_grads]) # ty: ignore[unresolved-attribute] return leaves diff --git a/tests/unit/aggregation/test_aligned_mtl.py b/tests/unit/aggregation/test_aligned_mtl.py index db89207b..d48e8855 100644 --- a/tests/unit/aggregation/test_aligned_mtl.py +++ b/tests/unit/aggregation/test_aligned_mtl.py @@ -39,7 +39,7 @@ def test_representations() -> None: def test_invalid_scale_mode() -> None: - aggregator = AlignedMTL(scale_mode="test") # type: ignore[arg-type] + aggregator = AlignedMTL(scale_mode="test") # ty: ignore[invalid-argument-type] matrix = ones_(3, 4) with raises(ValueError, match=r"Invalid scale_mode=.*Expected"): aggregator(matrix) diff --git a/tests/unit/autojac/test_backward.py b/tests/unit/autojac/test_backward.py index be42ebec..893ad1a3 100644 --- a/tests/unit/autojac/test_backward.py +++ b/tests/unit/autojac/test_backward.py @@ -317,7 +317,7 @@ def test_input_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -b.grad # type: ignore[unsupported-operator] + _ = -b.grad # ty: ignore[unsupported-operator] def test_non_input_retaining_grad_fails() -> None: @@ -336,7 +336,7 @@ def test_non_input_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -b.grad # type: ignore[unsupported-operator] + _ = -b.grad # ty: ignore[unsupported-operator] @mark.parametrize("chunk_size", [1, 3, None]) diff --git a/tests/unit/autojac/test_jac.py b/tests/unit/autojac/test_jac.py index a13164fb..a366c0c5 100644 --- a/tests/unit/autojac/test_jac.py +++ b/tests/unit/autojac/test_jac.py @@ -315,7 +315,7 @@ def test_input_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -b.grad # type: ignore[unsupported-operator] + _ = -b.grad # ty: ignore[unsupported-operator] def test_non_input_retaining_grad_fails() -> None: @@ -334,7 +334,7 @@ def test_non_input_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -b.grad # type: ignore[unsupported-operator] + _ = -b.grad # ty: ignore[unsupported-operator] @mark.parametrize("chunk_size", [1, 3, None]) diff --git a/tests/unit/autojac/test_mtl_backward.py b/tests/unit/autojac/test_mtl_backward.py index 5ca13882..2e8ec2f5 100644 --- a/tests/unit/autojac/test_mtl_backward.py +++ b/tests/unit/autojac/test_mtl_backward.py @@ -448,7 +448,7 @@ def test_shared_param_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -a.grad # type: ignore[unsupported-operator] + _ = -a.grad # ty: ignore[unsupported-operator] def test_shared_activation_retaining_grad_fails() -> None: @@ -477,7 +477,7 @@ def test_shared_activation_retaining_grad_fails() -> None: with raises(RuntimeError): # Using such a BatchedTensor should result in an error - _ = -a.grad # type: ignore[unsupported-operator] + _ = -a.grad # ty: ignore[unsupported-operator] def test_tasks_params_overlap() -> None: diff --git a/tests/utils/forward_backwards.py b/tests/utils/forward_backwards.py index 008fbad4..d44c3803 100644 --- a/tests/utils/forward_backwards.py +++ b/tests/utils/forward_backwards.py @@ -139,9 +139,10 @@ def get_vjp(grad_outputs: Tensor) -> list[Tensor]: jacobians = vmap(get_vjp)(torch.diag(torch.ones_like(output))) jacobian_matrices = [jacobian.reshape([jacobian.shape[0], -1]) for jacobian in jacobians] - gramian = sum([jacobian @ jacobian.T for jacobian in jacobian_matrices]) + products = [jacobian @ jacobian.T for jacobian in jacobian_matrices] + gramian = torch.stack(products).sum(dim=0) - return gramian + return PSDTensor(gramian) class CloneParams: