@@ -17,33 +17,34 @@ __device__ __forceinline__ void set_rows_1<float, float>(const float * src_f, fl
17
17
*dst_f = *src_f;
18
18
}
19
19
20
- // TODO: consolidate kernels from cpy.cu, get_rows etc to make this function generic
21
20
template <typename src_t , typename dst_t >
22
21
static __global__ void k_set_rows (
23
22
const src_t * __restrict__ src0, const int64_t * __restrict__ src1, dst_t * __restrict__ dst,
24
23
const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
25
24
const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13,
26
25
const size_t nb01, const size_t nb02, const size_t nb03,
27
26
const size_t nb10, const size_t nb11, const size_t nb12,
28
- const size_t nb1, const size_t nb2, const size_t nb3,
29
- const size_t src_type_size, const size_t dst_type_size) {
27
+ const size_t nb1, const size_t nb2, const size_t nb3) {
30
28
31
- const int i03 = blockIdx .z / ne02;
32
- const int i02 = blockIdx .z % ne02;
33
- const int i01 = blockDim .x * blockIdx .x + threadIdx .x ;
34
- const int i00 = blockIdx .y ;
29
+ const int64_t i = blockDim .x * blockIdx .x + threadIdx .x ;
30
+ const int64_t ne_total = ne00 * ne01 * ne02 * ne03;
35
31
36
- if (i01 >= ne01 ) {
32
+ if (i >= ne_total ) {
37
33
return ;
38
34
}
39
35
40
- const int i12 = i03 % ne12;
41
- const int i11 = i02 % ne11;
42
- const int i10 = i01;
36
+ const int64_t i03 = i / (ne00 * ne01 * ne02);
37
+ const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
38
+ const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00;
39
+ const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00;
40
+
41
+ const int64_t i12 = i03 % ne12;
42
+ const int64_t i11 = i02 % ne11;
43
+ const int64_t i10 = i01;
43
44
44
45
const int64_t dst_row = *(src1 + i10*nb10 + i11*nb11 + i12*nb12);
45
46
46
- const src_t * src0_row = ( const src_t *) src0 + i01*nb01 + i02*nb02 + i03*nb03;
47
+ const src_t * src0_row = src0 + i01*nb01 + i02*nb02 + i03*nb03;
47
48
dst_t * dst_row_ptr = dst + dst_row*nb1 + i02*nb2 + i03*nb3;
48
49
49
50
const src_t * src_elem = src0_row + i00;
@@ -59,38 +60,32 @@ static void set_rows_cuda(
59
60
const size_t nb01, const size_t nb02, const size_t nb03,
60
61
const size_t nb10, const size_t nb11, const size_t nb12,
61
62
const size_t nb1, const size_t nb2, const size_t nb3,
62
- const size_t src_type_size, const size_t dst_type_size,
63
63
cudaStream_t stream) {
64
64
65
+ const int64_t ne_total = ne00 * ne01 * ne02 * ne03;
66
+ const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1 ) / CUDA_SET_ROWS_BLOCK_SIZE;
65
67
const dim3 block_size (CUDA_SET_ROWS_BLOCK_SIZE);
66
- const dim3 grid_size (
67
- (ne01 + CUDA_SET_ROWS_BLOCK_SIZE - 1 )/CUDA_SET_ROWS_BLOCK_SIZE,
68
- ne00,
69
- ne03*ne02
70
- );
71
-
72
- const int s1 = nb01 / sizeof (src_t );
73
- const int s2 = nb02 / sizeof (src_t );
74
- const int s3 = nb03 / sizeof (src_t );
68
+ const dim3 grid_size (num_blocks);
75
69
76
- const int s10 = nb10 / sizeof (int64_t );
77
- const int s11 = nb11 / sizeof (int64_t );
78
- const int s12 = nb12 / sizeof (int64_t );
79
70
80
- const int s_dst = nb1 / sizeof (dst_t );
81
- const int s_dst2 = nb2 / sizeof (dst_t );
82
- const int s_dst3 = nb3 / sizeof (dst_t );
71
+ const int64_t s01 = nb01/sizeof (src_t );
72
+ const int64_t s02 = nb02/sizeof (src_t );
73
+ const int64_t s03 = nb03/sizeof (src_t );
74
+ const int64_t s10 = nb10/sizeof (int64_t );
75
+ const int64_t s11 = nb11/sizeof (int64_t );
76
+ const int64_t s12 = nb12/sizeof (int64_t );
77
+ const int64_t s1 = nb1/sizeof (dst_t );
78
+ const int64_t s2 = nb2/sizeof (dst_t );
79
+ const int64_t s3 = nb3/sizeof (dst_t );
83
80
84
-
85
- if (ne01 > 0 && ne00 > 0 ) {
81
+ if (ne_total > 0 ) {
86
82
k_set_rows<<<grid_size, block_size, 0 , stream>>> (
87
83
src0_d, src1_d, dst_d,
88
84
ne00, ne01, ne02, ne03,
89
85
ne10, ne11, ne12, ne13,
90
- s1, s2, s3 ,
86
+ s01, s02, s03 ,
91
87
s10, s11, s12,
92
- s_dst, s_dst2, s_dst3,
93
- src_type_size, dst_type_size);
88
+ s1, s2, s3);
94
89
}
95
90
}
96
91
@@ -109,6 +104,8 @@ void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
109
104
110
105
cudaStream_t stream = ctx.stream ();
111
106
107
+
108
+
112
109
if (dst->type == GGML_TYPE_F32) {
113
110
set_rows_cuda (
114
111
src0_d, src1_d, (float *)dst->data ,
@@ -117,7 +114,6 @@ void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
117
114
nb01, nb02, nb03,
118
115
nb10, nb11, nb12,
119
116
nb1, nb2, nb3,
120
- sizeof (float ), sizeof (float ),
121
117
stream
122
118
);
123
119
} else if (dst->type == GGML_TYPE_F16) {
@@ -128,7 +124,6 @@ void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
128
124
nb01, nb02, nb03,
129
125
nb10, nb11, nb12,
130
126
nb1, nb2, nb3,
131
- sizeof (float ), sizeof (half),
132
127
stream
133
128
);
134
129
} else {
0 commit comments