@@ -58,6 +58,15 @@ spirv.func @cooperative_matrix_load_stride_i16(%ptr : !spirv.ptr<i32, StorageBuf
58
58
spirv.Return
59
59
}
60
60
61
+ // CHECK-LABEL: @cooperative_matrix_load_aligned
62
+ spirv.func @cooperative_matrix_load_aligned (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ) " None" {
63
+ // CHECK: {{%.*}} = spirv.KHR.CooperativeMatrixLoad {{%.*}}, {{%.*}}, <RowMajor>, <Aligned>, 16 :
64
+ // CHECK-SAME: !spirv.ptr<i32, StorageBuffer>, i32 -> !spirv.coopmatrix<16x8xi32, Workgroup, MatrixA>
65
+ %0 = spirv.KHR.CooperativeMatrixLoad %ptr , %stride , <RowMajor >, <Aligned >, 16 :
66
+ !spirv.ptr <i32 , StorageBuffer >, i32 -> !spirv.coopmatrix <16 x8 xi32 , Workgroup , MatrixA >
67
+ spirv.Return
68
+ }
69
+
61
70
// CHECK-LABEL: @cooperative_matrix_store
62
71
spirv.func @cooperative_matrix_store (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ,
63
72
%m : !spirv.coopmatrix <8 x16 xi32 , Workgroup , MatrixA >) " None" {
@@ -90,6 +99,16 @@ spirv.func @cooperative_matrix_store_stride_i16(%ptr : !spirv.ptr<i32, StorageBu
90
99
spirv.Return
91
100
}
92
101
102
+ // CHECK-LABEL: @cooperative_matrix_store_aligned
103
+ spirv.func @cooperative_matrix_store_aligned (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ,
104
+ %m : !spirv.coopmatrix <8 x16 xi32 , Workgroup , MatrixA >) " None" {
105
+ // CHECK: spirv.KHR.CooperativeMatrixStore {{%.*}}, {{%.*}}, {{%.*}}, <RowMajor>, <Aligned>, 16 :
106
+ // CHECK-SAME: !spirv.ptr<i32, StorageBuffer>, !spirv.coopmatrix<8x16xi32, Workgroup, MatrixA>, i32
107
+ spirv.KHR.CooperativeMatrixStore %ptr , %m , %stride , <RowMajor >, <Aligned >, 16 :
108
+ !spirv.ptr <i32 , StorageBuffer >, !spirv.coopmatrix <8 x16 xi32 , Workgroup , MatrixA >, i32
109
+ spirv.Return
110
+ }
111
+
93
112
// -----
94
113
95
114
spirv.func @cooperative_matrix_load_bad_ptr (%ptr : !spirv.ptr <!spirv.struct <(f32 [0 ])>, StorageBuffer >, %stride : i32 ) " None" {
@@ -120,7 +139,7 @@ spirv.func @cooperative_matrix_load_bad_operad(%ptr : !spirv.ptr<i32, StorageBuf
120
139
// -----
121
140
122
141
spirv.func @cooperative_matrix_load_aligned (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ) " None" {
123
- // expected-error @+1 {{op has unhandled memory operand 'Aligned'}}
142
+ // expected-error @+1 {{missing value for the 'Aligned' memory operand }}
124
143
%0 = spirv.KHR.CooperativeMatrixLoad %ptr , %stride , <ColumnMajor >, <Aligned > :
125
144
!spirv.ptr <i32 , StorageBuffer >, i32 -> !spirv.coopmatrix <8 x16 xi32 , Subgroup , MatrixA >
126
145
spirv.Return
@@ -129,7 +148,7 @@ spirv.func @cooperative_matrix_load_aligned(%ptr : !spirv.ptr<i32, StorageBuffer
129
148
// -----
130
149
131
150
spirv.func @cooperative_matrix_load_aligned (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ) " None" {
132
- // expected-error @+1 {{op has unhandled memory operand 'Aligned'}}
151
+ // expected-error @+1 {{missing value for the 'Aligned' memory operand }}
133
152
%0 = spirv.KHR.CooperativeMatrixLoad %ptr , %stride , <ColumnMajor >, <Volatile |Aligned > :
134
153
!spirv.ptr <i32 , StorageBuffer >, i32 -> !spirv.coopmatrix <8 x16 xi32 , Subgroup , MatrixA >
135
154
spirv.Return
@@ -179,14 +198,23 @@ spirv.func @cooperative_matrix_store_bad_operand(%ptr : !spirv.ptr<i32, StorageB
179
198
180
199
spirv.func @cooperative_matrix_store (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ,
181
200
%m : !spirv.coopmatrix <8 x16 xi32 , Workgroup , MatrixA >) " None" {
182
- // expected-error @+1 {{op has unhandled memory operand 'Aligned'}}
201
+ // expected-error @+1 {{missing value for the 'Aligned' memory operand }}
183
202
spirv.KHR.CooperativeMatrixStore %ptr , %m , %stride , <RowMajor >, <Aligned > :
184
203
!spirv.ptr <i32 , StorageBuffer >, !spirv.coopmatrix <8 x16 xi32 , Workgroup , MatrixA >, i32
185
204
spirv.Return
186
205
}
187
206
188
207
// -----
189
208
209
+ spirv.func @cooperative_matrix_store_bad_operand_arg (%ptr : !spirv.ptr <i32 , StorageBuffer >, %stride : i32 ) " None" {
210
+ // expected-error @+1 {{found alignment attribute for non-'Aligned' memory operand}}
211
+ %0 = spirv.KHR.CooperativeMatrixLoad %ptr , %stride , <RowMajor >, <MakePointerVisible >, 16 :
212
+ !spirv.ptr <i32 , StorageBuffer >, i32 -> !spirv.coopmatrix <16 x8 xi32 , Workgroup , MatrixA >
213
+ spirv.Return
214
+ }
215
+
216
+ // -----
217
+
190
218
spirv.func @cooperative_matrix_muladd (%a : !spirv.coopmatrix <8 x16 xi32 , Subgroup , MatrixA >,
191
219
%b : !spirv.coopmatrix <16 x4 xi32 , Subgroup , MatrixB >,
192
220
%c : !spirv.coopmatrix <8 x4 xi32 , Subgroup , MatrixAcc >) " None" {
0 commit comments