|  | 
|  | 1 | +from __future__ import annotations | 
|  | 2 | + | 
|  | 3 | +import argparse | 
|  | 4 | + | 
|  | 5 | +import torch | 
|  | 6 | +import torch.distributed as dist | 
|  | 7 | +import torch.distributed._symmetric_memory as symm_mem | 
|  | 8 | + | 
|  | 9 | +from .experiment_util import BenchmarkOperator | 
|  | 10 | +from .experiment_util import ExperimentConfig | 
|  | 11 | + | 
|  | 12 | +BUILDIN_SHAPES = [ | 
|  | 13 | +    (256, 256, 256), | 
|  | 14 | +    (384, 384, 384), | 
|  | 15 | +    (512, 512, 512), | 
|  | 16 | +    (640, 640, 640), | 
|  | 17 | +    (768, 768, 768), | 
|  | 18 | +    (896, 896, 896), | 
|  | 19 | +    (1024, 1024, 1024), | 
|  | 20 | +    (1152, 1152, 1152), | 
|  | 21 | +    (1280, 1280, 1280), | 
|  | 22 | +    (1408, 1408, 1408), | 
|  | 23 | +    (1536, 1536, 1536), | 
|  | 24 | +    (1664, 1664, 1664), | 
|  | 25 | +    (1792, 1792, 1792), | 
|  | 26 | +    (1920, 1920, 1920), | 
|  | 27 | +    (2048, 2048, 2048), | 
|  | 28 | +    (2176, 2176, 2176), | 
|  | 29 | +    (2304, 2304, 2304), | 
|  | 30 | +    (2432, 2432, 2432), | 
|  | 31 | +    (2560, 2560, 2560), | 
|  | 32 | +    (2688, 2688, 2688), | 
|  | 33 | +    (2816, 2816, 2816), | 
|  | 34 | +    (2944, 2944, 2944), | 
|  | 35 | +    (3072, 3072, 3072), | 
|  | 36 | +    (3200, 3200, 3200), | 
|  | 37 | +    (3328, 3328, 3328), | 
|  | 38 | +    (3456, 3456, 3456), | 
|  | 39 | +    (3584, 3584, 3584), | 
|  | 40 | +    (3712, 3712, 3712), | 
|  | 41 | +    (3840, 3840, 3840), | 
|  | 42 | +    (3968, 3968, 3968), | 
|  | 43 | +    (4096, 4096, 4096), | 
|  | 44 | +] | 
|  | 45 | + | 
|  | 46 | + | 
|  | 47 | +class AGMatmulBench(BenchmarkOperator): | 
|  | 48 | +    def gen_configs(self, args: argparse.Namespace) -> list[ExperimentConfig]: | 
|  | 49 | +        all_configs = [] | 
|  | 50 | +        for sz in args.shape: | 
|  | 51 | +            all_configs.append( | 
|  | 52 | +                ExperimentConfig( | 
|  | 53 | +                    shape=sz, | 
|  | 54 | +                    dtype=args.dtype, | 
|  | 55 | +                    backends=args.backend, | 
|  | 56 | +                    device=self.device, | 
|  | 57 | +                ) | 
|  | 58 | +            ) | 
|  | 59 | + | 
|  | 60 | +        return all_configs | 
|  | 61 | + | 
|  | 62 | +    def gen_inputs(self, config: ExperimentConfig) -> tuple: | 
|  | 63 | +        M, N, K = config.shape | 
|  | 64 | +        a = symm_mem.empty( | 
|  | 65 | +            (M, K), | 
|  | 66 | +            dtype=config.dtype, | 
|  | 67 | +            device=config.device, | 
|  | 68 | +        ) | 
|  | 69 | +        b = ( | 
|  | 70 | +            torch.randn((K, N), device=config.device, dtype=config.dtype) | 
|  | 71 | +            .T.contiguous() | 
|  | 72 | +            .T | 
|  | 73 | +        ) | 
|  | 74 | +        assert dist.group.WORLD is not None | 
|  | 75 | +        symm_mem.rendezvous(a, dist.group.WORLD.group_name) | 
|  | 76 | +        return (a, b) | 
|  | 77 | + | 
|  | 78 | +    def additional_parser_args( | 
|  | 79 | +        self, parser: argparse.ArgumentParser | 
|  | 80 | +    ) -> argparse.ArgumentParser: | 
|  | 81 | +        def matmul_shape_type(s: str) -> tuple[int, int, int]: | 
|  | 82 | +            try: | 
|  | 83 | +                M, N, K = map(int, s.split(",")) | 
|  | 84 | +                return M, N, K | 
|  | 85 | +            except Exception as e: | 
|  | 86 | +                raise argparse.ArgumentTypeError( | 
|  | 87 | +                    "Matmul shape must be M, N, K. (M, K) @ (K, N) -> (M, N)" | 
|  | 88 | +                ) from e | 
|  | 89 | + | 
|  | 90 | +        parser.add_argument( | 
|  | 91 | +            "--shape", | 
|  | 92 | +            type=matmul_shape_type, | 
|  | 93 | +            nargs="+", | 
|  | 94 | +            default=BUILDIN_SHAPES, | 
|  | 95 | +            help="matmul shapes: (M, N, K). (M, K) @ (K, N) -> (M, N)", | 
|  | 96 | +        ) | 
|  | 97 | +        return parser | 
|  | 98 | + | 
|  | 99 | +    def __init__(self) -> None: | 
|  | 100 | +        self.op_name = "ag_matmul" | 
|  | 101 | +        self.baseline = "nccl" | 
|  | 102 | +        super().__init__() | 
|  | 103 | + | 
|  | 104 | +        def nccl_mem_ag_mm( | 
|  | 105 | +            a_shared: torch.Tensor, b: torch.Tensor | 
|  | 106 | +        ) -> tuple[torch.Tensor, torch.Tensor]: | 
|  | 107 | +            from torch.distributed._functional_collectives import all_gather_tensor | 
|  | 108 | + | 
|  | 109 | +            a_gathered = all_gather_tensor(a_shared, 0, "0") | 
|  | 110 | +            return a_gathered, torch.matmul(a_gathered, b) | 
|  | 111 | + | 
|  | 112 | +        def torch_symm_mem_ag_mm( | 
|  | 113 | +            a_shared: torch.Tensor, b: torch.Tensor | 
|  | 114 | +        ) -> tuple[torch.Tensor, torch.Tensor]: | 
|  | 115 | +            a_gathered, c = torch.ops.symm_mem.fused_all_gather_matmul( | 
|  | 116 | +                a_shared, [b], gather_dim=0, group_name=dist.group.WORLD.group_name | 
|  | 117 | +            ) | 
|  | 118 | +            return a_gathered, c[0] | 
|  | 119 | + | 
|  | 120 | +        assert dist.group.WORLD is not None | 
|  | 121 | + | 
|  | 122 | +        AG_MATMUL_DICT = { | 
|  | 123 | +            "nccl": nccl_mem_ag_mm, | 
|  | 124 | +            "torch_symm_mem": torch_symm_mem_ag_mm, | 
|  | 125 | +            "helion": ("examples.all_gather_matmul", "helion_all_gather_matmul"), | 
|  | 126 | +            "kraken": ("kraken.all_gather", "all_gather_matmul"), | 
|  | 127 | +        } | 
|  | 128 | +        self.backend_dict = AG_MATMUL_DICT | 
0 commit comments