|  | 
| 1 | 1 | # Copyright (c) Meta Platforms, Inc. and affiliates | 
| 2 | 2 | # implement matrix related ops for distributed tensor | 
| 3 |  | -import itertools | 
| 4 |  | -from typing import cast, List | 
| 5 | 3 | 
 | 
| 6 | 4 | import torch | 
| 7 |  | -from torch.distributed._tensor.op_schema import ( | 
| 8 |  | -    OpSchema, | 
| 9 |  | -    OpStrategy, | 
| 10 |  | -    PlacementStrategy, | 
| 11 |  | -    StrategyType, | 
| 12 |  | -) | 
| 13 |  | -from torch.distributed._tensor.ops.utils import ( | 
| 14 |  | -    generate_redistribute_costs, | 
| 15 |  | -    is_tensor_shardable, | 
| 16 |  | -    register_op_strategy, | 
| 17 |  | -) | 
|  | 5 | +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding | 
|  | 6 | +from torch.distributed._tensor.ops.utils import register_prop_rule | 
| 18 | 7 | 
 | 
| 19 | 8 | from torch.distributed._tensor.placement_types import ( | 
| 20 | 9 |     _Partial, | 
| 21 | 10 |     DTensorSpec, | 
| 22 |  | -    Placement, | 
| 23 | 11 |     Replicate, | 
| 24 | 12 |     Shard, | 
| 25 | 13 | ) | 
| 26 | 14 | 
 | 
| 27 |  | -from torch.distributed.device_mesh import DeviceMesh | 
| 28 |  | - | 
| 29 | 15 | aten = torch.ops.aten | 
| 30 | 16 | 
 | 
| 31 | 17 | 
 | 
| 32 |  | -@register_op_strategy(aten.embedding.default) | 
| 33 |  | -def embedding_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: | 
| 34 |  | -    """ | 
| 35 |  | -    This strategy handles embedding op. We have two possible embedding shardings: | 
| 36 |  | -    rowwise and colwise | 
| 37 |  | -    # TODO: implement rowwise sharding | 
| 38 |  | -    """ | 
| 39 |  | -    weight_strategy = cast(OpStrategy, op_schema.args_schema[0]) | 
| 40 |  | -    indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) | 
| 41 |  | - | 
| 42 |  | -    weight_shape = weight_strategy.output_shape | 
| 43 |  | -    indices_shape = indices_strategy.output_shape | 
| 44 |  | -    output_emd_dim = len(indices_shape) | 
| 45 |  | - | 
| 46 |  | -    # guard rowwise sharding not implemented for now | 
| 47 |  | -    weight_spec = weight_strategy.strategies[0].output_spec | 
|  | 18 | +# TODO: Enable BWD for embedding op. | 
|  | 19 | +@register_prop_rule(aten.embedding.default) | 
|  | 20 | +def embedding_rules(op_schema: OpSchema) -> OutputSharding: | 
|  | 21 | +    weight_spec, inp_spec = op_schema.args_spec | 
| 48 | 22 |     if any(placement.is_shard(0) for placement in weight_spec.placements): | 
| 49 | 23 |         raise NotImplementedError( | 
| 50 | 24 |             "DTensor does not support row-wise sharded embedding operation yet!" | 
| 51 | 25 |         ) | 
| 52 | 26 | 
 | 
| 53 |  | -    all_mesh_dim_strategies = [] | 
| 54 |  | - | 
| 55 |  | -    for mesh_dim in range(mesh.ndim): | 
| 56 |  | -        single_mesh_dim_strategies = [] | 
| 57 |  | - | 
| 58 |  | -        # placement list stores placements of [output, weight, input_indices] | 
| 59 |  | -        # first we always have replicate all for inputs and output | 
| 60 |  | -        all_replicate: List[Placement] = [Replicate()] * 3 | 
| 61 |  | -        single_mesh_dim_strategies.append(all_replicate) | 
| 62 |  | - | 
| 63 |  | -        # colwise sharding, output shard on last dim, weight shard on dim 1, input replicate | 
| 64 |  | -        colwise_sharding = [Shard(output_emd_dim), Shard(1), Replicate()] | 
| 65 |  | -        single_mesh_dim_strategies.append(colwise_sharding) | 
| 66 |  | - | 
| 67 |  | -        # batch dim sharding, weight replicated, input can shard on any dim, output follows input | 
| 68 |  | -        for input_dim in range(len(indices_shape)): | 
| 69 |  | -            batch_sharding = [Shard(input_dim), Replicate(), Shard(input_dim)] | 
| 70 |  | -            single_mesh_dim_strategies.append(batch_sharding) | 
| 71 |  | - | 
| 72 |  | -        all_mesh_dim_strategies.append(single_mesh_dim_strategies) | 
| 73 |  | - | 
| 74 |  | -    strategy_combs = itertools.product(*all_mesh_dim_strategies) | 
| 75 |  | - | 
| 76 |  | -    all_strategies = [] | 
| 77 |  | -    for strategy_comb in strategy_combs: | 
| 78 |  | -        spec_list = [] | 
| 79 |  | -        for specs in zip(*strategy_comb): | 
| 80 |  | -            spec_list.append(DTensorSpec(mesh, tuple(specs))) | 
| 81 |  | - | 
| 82 |  | -        if is_tensor_shardable(weight_shape, spec_list[1]) and is_tensor_shardable( | 
| 83 |  | -            indices_shape, spec_list[2] | 
| 84 |  | -        ): | 
| 85 |  | -            # only add to the strategy list when both weight and indices are shardable | 
| 86 |  | -            weight_spec, indices_spec = spec_list[1:] | 
| 87 |  | -            redistribute_cost = [ | 
| 88 |  | -                generate_redistribute_costs(weight_strategy, weight_spec), | 
| 89 |  | -                generate_redistribute_costs(indices_strategy, indices_spec), | 
| 90 |  | -            ] | 
| 91 |  | -            strat = PlacementStrategy( | 
| 92 |  | -                output_specs=spec_list[0], | 
| 93 |  | -                input_specs=spec_list[1:], | 
| 94 |  | -                redistribute_cost=redistribute_cost, | 
| 95 |  | -            ) | 
| 96 |  | -            all_strategies.append(strat) | 
| 97 |  | - | 
| 98 |  | -    return OpStrategy(all_strategies) | 
| 99 |  | - | 
| 100 |  | - | 
| 101 |  | -@register_op_strategy(aten.embedding_dense_backward.default) | 
| 102 |  | -def embedding_dense_backward_strategy( | 
| 103 |  | -    mesh: DeviceMesh, op_schema: OpSchema | 
| 104 |  | -) -> StrategyType: | 
| 105 |  | -    """ | 
| 106 |  | -    This strategy handles embedding op. We have two possible embedding shardings: | 
| 107 |  | -    rowwise and colwise | 
| 108 |  | -    # TODO: implement rowwise sharding backward | 
| 109 |  | -    """ | 
| 110 |  | -    grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0]) | 
| 111 |  | -    indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) | 
| 112 |  | - | 
| 113 |  | -    grad_out_shape = grad_out_strategy.output_shape | 
| 114 |  | -    indices_shape = indices_strategy.output_shape | 
| 115 |  | -    grad_out_ndim = len(grad_out_shape) | 
| 116 |  | - | 
| 117 |  | -    all_mesh_dim_strategies = [] | 
| 118 |  | - | 
| 119 |  | -    for mesh_dim in range(mesh.ndim): | 
| 120 |  | -        single_mesh_dim_strategies = [] | 
| 121 |  | - | 
| 122 |  | -        # placement list stores placements of [output, weight, input_indices] | 
| 123 |  | -        # first we always have replicate all for inputs and output | 
| 124 |  | -        all_replicate: List[Placement] = [Replicate()] * 3 | 
| 125 |  | -        single_mesh_dim_strategies.append(all_replicate) | 
| 126 |  | - | 
| 127 |  | -        # colwise sharding backward, grad_out shard on last dim, input replicate, | 
| 128 |  | -        # weight grad shard colwise | 
| 129 |  | -        colwise_sharding = [Shard(1), Shard(grad_out_ndim - 1), Replicate()] | 
| 130 |  | -        single_mesh_dim_strategies.append(colwise_sharding) | 
| 131 |  | - | 
| 132 |  | -        # batch dim sharding, weight replicated, grad_out/input have same sharding | 
| 133 |  | -        # that can shard on any dim, weight grad partial | 
| 134 |  | -        for input_dim in range(len(indices_shape)): | 
| 135 |  | -            batch_sharding = [_Partial(), Shard(input_dim), Shard(input_dim)] | 
| 136 |  | -            single_mesh_dim_strategies.append(batch_sharding) | 
| 137 |  | - | 
| 138 |  | -        # grad_out partial, input replicate, weight grad keep partial | 
| 139 |  | -        partial_sharding = [_Partial(), _Partial(), Replicate()] | 
| 140 |  | -        single_mesh_dim_strategies.append(partial_sharding) | 
| 141 |  | - | 
| 142 |  | -        all_mesh_dim_strategies.append(single_mesh_dim_strategies) | 
| 143 |  | - | 
| 144 |  | -    strategy_combs = itertools.product(*all_mesh_dim_strategies) | 
|  | 27 | +    if weight_spec.is_replicated() and inp_spec.placements == [Shard(0)]: | 
|  | 28 | +        # Embedding table is replicated, input ids are sharded along batch | 
|  | 29 | +        # dimension. Output lookups should match input sharding spec in this case. | 
|  | 30 | +        return OutputSharding( | 
|  | 31 | +            output_spec=DTensorSpec(mesh=inp_spec.mesh, placements=inp_spec.placements) | 
|  | 32 | +        ) | 
| 145 | 33 | 
 | 
| 146 |  | -    all_strategies = [] | 
| 147 |  | -    for strategy_comb in strategy_combs: | 
| 148 |  | -        spec_list = [] | 
| 149 |  | -        for specs in zip(*strategy_comb): | 
| 150 |  | -            spec_list.append(DTensorSpec(mesh, tuple(specs))) | 
|  | 34 | +    if inp_spec.is_replicated(): | 
|  | 35 | +        weight_dim_map = weight_spec.dim_map | 
|  | 36 | +        output_dim_map = inp_spec.dim_map | 
|  | 37 | +        output_dim_map.append(weight_dim_map[1]) | 
|  | 38 | +        return OutputSharding( | 
|  | 39 | +            output_spec=DTensorSpec.from_dim_map(inp_spec.mesh, output_dim_map, []) | 
|  | 40 | +        ) | 
| 151 | 41 | 
 | 
| 152 |  | -        if is_tensor_shardable(grad_out_shape, spec_list[1]) and is_tensor_shardable( | 
| 153 |  | -            indices_shape, spec_list[2] | 
| 154 |  | -        ): | 
| 155 |  | -            # only add to the strategy list when both grad_out and indices are shardable | 
| 156 |  | -            grad_out_spec, indices_spec = spec_list[1:] | 
| 157 |  | -            redistribute_cost = [ | 
| 158 |  | -                generate_redistribute_costs(grad_out_strategy, grad_out_spec), | 
| 159 |  | -                generate_redistribute_costs(indices_strategy, indices_spec), | 
| 160 |  | -            ] | 
| 161 |  | -            strat = PlacementStrategy( | 
| 162 |  | -                output_specs=spec_list[0], | 
| 163 |  | -                input_specs=spec_list[1:], | 
| 164 |  | -                redistribute_cost=redistribute_cost, | 
|  | 42 | +    return OutputSharding( | 
|  | 43 | +        output_spec=None, | 
|  | 44 | +        schema_suggestions=[ | 
|  | 45 | +            OpSchema( | 
|  | 46 | +                op=op_schema.op, | 
|  | 47 | +                args_schema=( | 
|  | 48 | +                    weight_spec, | 
|  | 49 | +                    DTensorSpec( | 
|  | 50 | +                        mesh=inp_spec.mesh, | 
|  | 51 | +                        placements=tuple([Replicate()] * len(inp_spec.placements)), | 
|  | 52 | +                        tensor_meta=inp_spec.tensor_meta, | 
|  | 53 | +                    ), | 
|  | 54 | +                ), | 
|  | 55 | +                kwargs_schema=op_schema.kwargs_schema, | 
| 165 | 56 |             ) | 
| 166 |  | -            all_strategies.append(strat) | 
| 167 |  | - | 
| 168 |  | -    return OpStrategy(all_strategies) | 
|  | 57 | +        ], | 
|  | 58 | +    ) | 
|  | 59 | + | 
|  | 60 | + | 
|  | 61 | +@register_prop_rule(aten.embedding_renorm_.default) | 
|  | 62 | +def embedding_renorm_rules(op_schema: OpSchema) -> OutputSharding: | 
|  | 63 | +    raise NotImplementedError( | 
|  | 64 | +        "DTensor does not support sharded embedding operation with max_norm yet!" | 
|  | 65 | +    ) | 
|  | 66 | + | 
|  | 67 | + | 
|  | 68 | +@register_prop_rule(aten.embedding_dense_backward.default) | 
|  | 69 | +def embedding_dense_backward_rules(op_schema: OpSchema) -> OutputSharding: | 
|  | 70 | +    grad_output, indices = op_schema.args_schema[:2] | 
|  | 71 | +    assert isinstance(grad_output, DTensorSpec) | 
|  | 72 | +    assert isinstance(indices, DTensorSpec) | 
|  | 73 | +    if grad_output.placements == indices.placements: | 
|  | 74 | +        # The embedding table is replicated, and input/oupput activations are | 
|  | 75 | +        # sharded. In this case, gradients for the embedding table should be | 
|  | 76 | +        # Partial. | 
|  | 77 | +        return OutputSharding( | 
|  | 78 | +            output_spec=DTensorSpec(mesh=indices.mesh, placements=(_Partial(),)) | 
|  | 79 | +        ) | 
|  | 80 | +    elif grad_output.placements == [_Partial()] and indices.placements == [Replicate()]: | 
|  | 81 | +        # The embedding table is replicated and the indices is also replicated | 
|  | 82 | +        # (local is a more precise term). This is postional embedding. In this | 
|  | 83 | +        # case, gradients for the embmedding table should be Partial. | 
|  | 84 | +        return OutputSharding( | 
|  | 85 | +            output_spec=DTensorSpec(mesh=indices.mesh, placements=(_Partial(),)) | 
|  | 86 | +        ) | 
|  | 87 | +    elif all(placement.is_replicate() for placement in indices.placements): | 
|  | 88 | +        # BWD for colwise sharding case | 
|  | 89 | +        return OutputSharding( | 
|  | 90 | +            output_spec=DTensorSpec(mesh=indices.mesh, placements=(Shard(1),)) | 
|  | 91 | +        ) | 
|  | 92 | +    else: | 
|  | 93 | +        raise NotImplementedError( | 
|  | 94 | +            "Unsupported embedding dense backward schema:\n" | 
|  | 95 | +            f"grad_output - {grad_output}\n" | 
|  | 96 | +            f"indices - {indices}" | 
|  | 97 | +        ) | 
0 commit comments