Skip to content

Wgsl printer

wgsl_printer

WGSLPrinter dataclass

Bases: BasePrinter

Source code in xdsl/backend/wgsl/wgsl_printer.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
class WGSLPrinter(BasePrinter):
    name_dict: dict[SSAValue, str] = dict()
    count = 0

    def wgsl_name(self, v: SSAValue):
        if v not in self.name_dict:
            if v.name_hint is not None:
                self.name_dict[v] = f"v{v.name_hint}"
            else:
                self.name_dict[v] = f"v{self.count}"
                self.count += 1
        return self.name_dict[v]

    @singledispatchmethod
    def print(self, op: Operation) -> None:
        raise NotImplementedError(
            f"Printing of '{op.name}' to WGSL is not implemented yet."
        )

    @print.register
    def _(self, op: gpu.ModuleOp):
        for o in op.body.ops:
            if isinstance(o, gpu.FuncOp):
                self.print(o)

    @print.register
    def _(self, op: gpu.FuncOp):
        workgroup_size = (1,)
        if op.known_block_size:
            workgroup_size = op.known_block_size.get_values()
        for arg in op.body.block.args:
            auth = "read"
            arg_type = ""
            for use in arg.uses:
                if isinstance(use.operation, memref.StoreOp):
                    auth = "read_write"
            if arg.type == builtin.f32:
                arg_type = "f32"
            elif arg.type == builtin.IndexType():
                arg_type = "u32"
            elif isa(arg.type, MemRefType):
                if arg.type.element_type == builtin.IndexType():
                    arg_type = "u32"
                else:
                    arg_type = arg.type.element_type
                arg_type = f"array<{arg_type}>"
            arguments = f"""
    @group(0) @binding({arg.index})
    var<storage,{auth}> {self.wgsl_name(arg)}: {arg_type};
"""
            self.print_string(arguments)

        self.print_string(
            f"""
    @compute
    @workgroup_size({",".join(str(i) for i in workgroup_size)})
    fn {op.sym_name.data}(@builtin(global_invocation_id) global_invocation_id : vec3<u32>,
    @builtin(workgroup_id) workgroup_id : vec3<u32>,
    @builtin(local_invocation_id) local_invocation_id : vec3<u32>,
    @builtin(num_workgroups) num_workgroups : vec3<u32>) {{
"""
        )
        for operation in op.body.ops:
            self.print(operation)
        self.print_string(
            """
            }
            """
        )

    @print.register
    def _(self, op: gpu.ReturnOp):
        pass

    @print.register
    def _(self, op: gpu.BlockIdOp):
        dim = str(op.dimension.data).strip('"')
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint}: u32 = workgroup_id.{dim};")

    @print.register
    def _(self, op: gpu.ThreadIdOp):
        dim = str(op.dimension.data).strip('"')
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint}: u32 = local_invocation_id.{dim};")

    @print.register
    def _(self, op: gpu.GridDimOp):
        dim = str(op.dimension.data).strip('"')
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint}: u32 = num_workgroups.{dim};")

    @print.register
    def _(self, op: gpu.BlockDimOp):
        dim = str(op.dimension.data).strip('"')
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint}: u32 = local_invocation_id.{dim};")

    @print.register
    def _(self, op: gpu.GlobalIdOp):
        dim = str(op.dimension.data).strip('"')
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint}: u32 = global_invocation_id.{dim};")

    @print.register
    def _(self, op: memref.LoadOp):
        load_ref = self.wgsl_name(op.memref)
        name_hint = self.wgsl_name(op.res)
        indices = [self.wgsl_name(i) for i in op.indices]
        index_value = self.calculate_index(op, indices)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {name_hint} = {load_ref}[{index_value}];")

    @print.register
    def _(self, op: memref.StoreOp):
        value = self.wgsl_name(op.value)
        store_ref = self.wgsl_name(op.memref)
        indices = [self.wgsl_name(i) for i in op.indices]
        index_value = self.calculate_index(op, indices)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"{store_ref}[{index_value}] = {value};")

    def calculate_index(self, op: memref.StoreOp | memref.LoadOp, indices: list[str]):
        """
        It is used for linearizing known sizes memref accesses.
        """
        memref_type = cast(MemRefType, op.memref.type)
        memref_dimension = memref_type.get_num_dims()
        memref_size = memref_type.get_shape()
        for size in memref_size:
            if size == -1:
                raise NotImplementedError(
                    "The WGSL translation only works with known sizes at the moment."
                )
        index_values: list[str] = []
        for i in range(memref_dimension):
            product_of_dims = 1
            for dim in memref_size[i + 1 :]:
                product_of_dims *= dim
            index_values.append(f"{product_of_dims}u * {indices[i]}")
        return " + ".join(index_values)

    @print.register
    def _(self, op: arith.ConstantOp):
        value = int(str(op.value).split()[0])
        cons_type = op.result.type
        if isinstance(op.result.type, builtin.IndexType):
            cons_type = "u32"
        name_hint = self.wgsl_name(op.result)
        self.print_string("\n")
        with self.indented(2):
            if cons_type == "u32":
                if value < 0:
                    value = 4294967296 + value
                self.print_string(f"let {name_hint} : {cons_type} = {value}u;")
            else:
                self.print_string(f"let {name_hint} : {cons_type} = {value};")

    @print.register
    def _(self, op: arith.AddiOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} + {rhs};")

    @print.register
    def _(self, op: arith.MuliOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} * {rhs};")

    @print.register
    def _(self, op: arith.SubiOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} - {rhs};")

    @print.register
    def _(self, op: arith.MulfOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} * {rhs};")

    @print.register
    def _(self, op: arith.AddfOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} + {rhs};")

    @print.register
    def _(self, op: arith.SubfOp):
        op_name_hint = self.wgsl_name(op.result)
        lhs = self.wgsl_name(op.lhs)
        rhs = self.wgsl_name(op.rhs)
        self.print_string("\n")
        with self.indented(2):
            self.print_string(f"let {op_name_hint} = {lhs} - {rhs};")

name_dict: dict[SSAValue, str] = dict() class-attribute instance-attribute

count = 0 class-attribute instance-attribute

wgsl_name(v: SSAValue)

Source code in xdsl/backend/wgsl/wgsl_printer.py
17
18
19
20
21
22
23
24
def wgsl_name(self, v: SSAValue):
    if v not in self.name_dict:
        if v.name_hint is not None:
            self.name_dict[v] = f"v{v.name_hint}"
        else:
            self.name_dict[v] = f"v{self.count}"
            self.count += 1
    return self.name_dict[v]

print(op: Operation) -> None

Source code in xdsl/backend/wgsl/wgsl_printer.py
26
27
28
29
30
@singledispatchmethod
def print(self, op: Operation) -> None:
    raise NotImplementedError(
        f"Printing of '{op.name}' to WGSL is not implemented yet."
    )

calculate_index(op: memref.StoreOp | memref.LoadOp, indices: list[str])

It is used for linearizing known sizes memref accesses.

Source code in xdsl/backend/wgsl/wgsl_printer.py
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
def calculate_index(self, op: memref.StoreOp | memref.LoadOp, indices: list[str]):
    """
    It is used for linearizing known sizes memref accesses.
    """
    memref_type = cast(MemRefType, op.memref.type)
    memref_dimension = memref_type.get_num_dims()
    memref_size = memref_type.get_shape()
    for size in memref_size:
        if size == -1:
            raise NotImplementedError(
                "The WGSL translation only works with known sizes at the moment."
            )
    index_values: list[str] = []
    for i in range(memref_dimension):
        product_of_dims = 1
        for dim in memref_size[i + 1 :]:
            product_of_dims *= dim
        index_values.append(f"{product_of_dims}u * {indices[i]}")
    return " + ".join(index_values)

_(op: arith.SubfOp)

Source code in xdsl/backend/wgsl/wgsl_printer.py
228
229
230
231
232
233
234
235
@print.register
def _(self, op: arith.SubfOp):
    op_name_hint = self.wgsl_name(op.result)
    lhs = self.wgsl_name(op.lhs)
    rhs = self.wgsl_name(op.rhs)
    self.print_string("\n")
    with self.indented(2):
        self.print_string(f"let {op_name_hint} = {lhs} - {rhs};")