File: test_SDDMM.py

package info (click to toggle)
swiftlang 6.0.3-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 2,519,992 kB
  • sloc: cpp: 9,107,863; ansic: 2,040,022; asm: 1,135,751; python: 296,500; objc: 82,456; f90: 60,502; lisp: 34,951; pascal: 19,946; sh: 18,133; perl: 7,482; ml: 4,937; javascript: 4,117; makefile: 3,840; awk: 3,535; xml: 914; fortran: 619; cs: 573; ruby: 573
file content (171 lines) | stat: -rw-r--r-- 6,345 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
# RUN: env SUPPORT_LIB=%mlir_c_runner_utils \
# RUN:   %PYTHON %s | FileCheck %s

import ctypes
import numpy as np
import os
import sys

from mlir import ir
from mlir import runtime as rt

from mlir.dialects import sparse_tensor as st
from mlir.dialects import builtin
from mlir.dialects import func
from mlir.dialects.linalg.opdsl import lang as dsl

_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import sparse_compiler


@dsl.linalg_structured_op
def sddmm_dsl(
    A=dsl.TensorDef(dsl.T, dsl.S.M, dsl.S.K),
    B=dsl.TensorDef(dsl.T, dsl.S.K, dsl.S.N),
    S=dsl.TensorDef(dsl.T, dsl.S.M, dsl.S.N),
    C=dsl.TensorDef(dsl.T, dsl.S.M, dsl.S.N, output=True),
):
    C[dsl.D.m, dsl.D.n] += (
        S[dsl.D.m, dsl.D.n] * A[dsl.D.m, dsl.D.k] * B[dsl.D.k, dsl.D.n]
    )


def build_SDDMM(attr: st.EncodingAttr):
    """Build SDDMM kernel.

    This method generates a linalg op with for matrix multiplication using
    just the Python API. Effectively, a generic linalg op is constructed
    that computes C(i,j) += S(i,j) SUM_k A(i,k) B(k,j) for sparse S.
    """
    module = ir.Module.create()
    f64 = ir.F64Type.get()
    a = ir.RankedTensorType.get([8, 8], f64)
    b = ir.RankedTensorType.get([8, 8], f64)
    c = ir.RankedTensorType.get([8, 8], f64)
    s = ir.RankedTensorType.get([8, 8], f64, attr)
    arguments = [a, b, s, c]
    with ir.InsertionPoint(module.body):

        @func.FuncOp.from_py_func(*arguments)
        def sddmm(*args):
            return sddmm_dsl(args[0], args[1], args[2], outs=[args[3]])

    return module


def boilerplate(attr: st.EncodingAttr):
    """Returns boilerplate code for main driver."""
    return f"""
func.func @main(%a: tensor<8x8xf64>,
           %b: tensor<8x8xf64>,
           %c: tensor<8x8xf64>) -> tensor<8x8xf64> attributes {{ llvm.emit_c_interface }} {{
  %t = arith.constant sparse<[[0,0], [0,2], [4,1]], [1.0, 2.0, 3.0]> : tensor<8x8xf64>
  %s = sparse_tensor.convert %t : tensor<8x8xf64> to tensor<8x8xf64, {attr}>
  %0 = call @sddmm(%a, %b, %s, %c) : (tensor<8x8xf64>,
                                      tensor<8x8xf64>,
                                      tensor<8x8xf64, {attr}>,
                                      tensor<8x8xf64>) -> tensor<8x8xf64>
  return %0 : tensor<8x8xf64>
}}
"""


def build_compile_and_run_SDDMMM(attr: st.EncodingAttr, compiler):
    # Build.
    module = build_SDDMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))

    # Compile.
    engine = compiler.compile_and_jit(module)

    # Set up numpy input and buffer for output.
    a = np.array(
        [
            [1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1],
            [1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2],
            [1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3],
            [1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4],
            [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
            [1.6, 2.6, 3.6, 4.6, 5.6, 6.6, 7.6, 8.6],
            [1.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7],
            [1.8, 2.8, 3.8, 4.8, 5.8, 6.8, 7.8, 8.8],
        ],
        np.float64,
    )
    b = np.ones((8, 8), np.float64)
    c = np.zeros((8, 8), np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
    mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))

    # Allocate a MemRefDescriptor to receive the output tensor.
    # The buffer itself is allocated inside the MLIR code generation.
    ref_out = rt.make_nd_memref_descriptor(2, ctypes.c_double)()
    mem_out = ctypes.pointer(ctypes.pointer(ref_out))

    # Invoke the kernel and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    engine.invoke("main", mem_out, mem_a, mem_b, mem_c)

    # Sanity check on computed result. Only a few elements
    # are sampled from the full dense matrix multiplication.
    full_matmul = np.matmul(a, b)
    expected = np.zeros((8, 8), np.float64)
    expected[0, 0] = 1.0 * full_matmul[0, 0]
    expected[0, 2] = 2.0 * full_matmul[0, 2]
    expected[4, 1] = 3.0 * full_matmul[4, 1]
    c = rt.ranked_memref_to_numpy(mem_out[0])
    if np.allclose(c, expected):
        pass
    else:
        quit(f"FAILURE")


def main():
    support_lib = os.getenv("SUPPORT_LIB")
    assert support_lib is not None, "SUPPORT_LIB is undefined"
    if not os.path.exists(support_lib):
        raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), support_lib)

    # CHECK-LABEL: TEST: testSDDMMM
    print("\nTEST: testSDDMMM")
    with ir.Context() as ctx, ir.Location.unknown():
        count = 0
        # Loop over various ways to compile and annotate the SDDMM kernel with
        # a *single* sparse tensor. Note that we deliberate do not exhaustively
        # search the full state space to reduce runtime of the test. It is
        # straightforward to adapt the code below to explore more combinations.
        levels = [
            [st.DimLevelType.dense, st.DimLevelType.dense],
            [st.DimLevelType.dense, st.DimLevelType.compressed],
            [st.DimLevelType.compressed, st.DimLevelType.dense],
            [st.DimLevelType.compressed, st.DimLevelType.compressed],
        ]
        orderings = [
            ir.AffineMap.get_permutation([0, 1]),
            ir.AffineMap.get_permutation([1, 0]),
        ]
        for level in levels:
            for ordering in orderings:
                for pwidth in [32]:
                    for iwidth in [32]:
                        for e in [True]:
                            attr = st.EncodingAttr.get(
                                level, ordering, pwidth, iwidth
                            )
                            opt = f"parallelization-strategy=none"
                            compiler = sparse_compiler.SparseCompiler(
                                options=opt, opt_level=0, shared_libs=[support_lib]
                            )
                            build_compile_and_run_SDDMMM(attr, compiler)
                            count = count + 1
    # CHECK: Passed 8 tests
    print("Passed ", count, "tests")


if __name__ == "__main__":
    main()