Skip to content

Commit a67f69c

Browse files
authored
Merge branch 'develop' into fix_logprobs
2 parents 27388dd + 0eb799a commit a67f69c

File tree

65 files changed

+4486
-120
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+4486
-120
lines changed

.github/workflows/ci_xpu.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,5 +82,5 @@ jobs:
8282
${docker_image} /bin/bash -c "
8383
git config --global --add safe.directory /workspace/FastDeploy
8484
cd FastDeploy
85-
bash scripts/run_ci_xpu.sh
85+
bash scripts/run_xpu_ci_pytest.sh
8686
"

.github/workflows/pr_build_and_test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ jobs:
5151
needs: [build]
5252
uses: ./.github/workflows/_logprob_test_linux.yml
5353
with:
54-
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
54+
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-paddle-dev
5555
PADDLETEST_ARCHIVE_URL: "https://xly-devops.bj.bcebos.com/PaddleTest/PaddleTest.tar.gz"
5656
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
5757
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"
@@ -71,7 +71,7 @@ jobs:
7171
needs: [clone,build]
7272
uses: ./.github/workflows/_base_test.yml
7373
with:
74-
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
74+
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-paddle-dev
7575
FASTDEPLOY_ARCHIVE_URL: ${{ needs.clone.outputs.repo_archive_url }}
7676
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
7777
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"

custom_ops/gpu_ops/moe/ep_moe_expert_dispatch.cu

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,9 @@ __global__ void permute_x_kernel(
430430
}
431431
abs_max = phi::BlockAllReduce<MaxOp, float, Kthread>(abs_max);
432432
float scale = 440.f / abs_max; // use 440 so we do not have to clip
433-
dequant_scale[dst_token_idx] = abs_max;
433+
if (tid == 0) {
434+
dequant_scale[dst_token_idx] = abs_max;
435+
}
434436
for (int v_id = tid; v_id < hidden_size_int4; v_id += blockDim.x) {
435437
Load<T, vec_size>(&data_smem[v_id * vec_size], &src_vec);
436438
#pragma unroll
@@ -661,7 +663,7 @@ std::vector<paddle::Tensor> EPMoeExpertDispatch(
661663

662664
int dequant_scale_size = 1;
663665
if (moe_quant_type == "w4afp8" && !up_gate_proj_in_scale) {
664-
dequant_scale_size = moe_topk * num_rows;
666+
dequant_scale_size = token_nums_this_rank;
665667
}
666668

667669
auto dequant_scale =
@@ -847,8 +849,11 @@ __global__ void permute_x_fp8_kernel(
847849
const int start_idx = i == 0 ? 0 : token_nums_per_expert_cum[i - 1];
848850
const int end_idx = token_nums_per_expert_cum[i];
849851
if (s_token_idx >= start_idx && s_token_idx < end_idx) {
850-
if ((s_token_idx - start_idx) < token_nums_per_expert[i])
852+
if ((s_token_idx - start_idx) < token_nums_per_expert[i]) {
851853
m_indices[s_token_idx] = i;
854+
} else {
855+
m_indices[s_token_idx] = -1;
856+
}
852857
break;
853858
}
854859
}
@@ -984,8 +989,8 @@ std::vector<paddle::Tensor> EPMoeExpertDispatchFP8(
984989
paddle::DataType::FLOAT32,
985990
place);
986991

987-
auto m_indices = paddle::full(
988-
{token_nums_feed_to_ffn}, -1, paddle::DataType::INT32, place);
992+
auto m_indices =
993+
GetEmptyTensor({token_nums_feed_to_ffn}, paddle::DataType::INT32, place);
989994
auto token_nums_per_expert_cumsum =
990995
GetEmptyTensor({num_experts_per_rank}, paddle::DataType::INT64, place);
991996
auto token_nums_per_expert_padded_cumsum =

custom_ops/utils/auto_gen_w4afp8_gemm_kernel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@
8585
"""
8686

8787
# [M, K, Number of experts, token Padding Size, weight K group size]
88-
gemm_case = [[256, 256, 2, 0, 128]]
88+
gemm_case = [[256, 256, 2, 0, 128], [512, 256, 2, 0, 128]]
8989

9090
dtype = ["BF16"]
9191

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include <stdio.h>
16+
#include <string.h>
17+
#include <sys/ipc.h>
18+
#include <sys/msg.h>
19+
#include <sys/types.h>
20+
#include "paddle/extension.h"
21+
22+
#ifndef PD_BUILD_STATIC_OP
23+
#define PD_BUILD_STATIC_OP(name) PD_BUILD_OP(static_op_##name)
24+
#endif
25+
26+
#define MAX_BSZ 128
27+
#define K 5
28+
29+
struct msgdata {
30+
long mtype;
31+
int mtext[MAX_BSZ * (K + 1) + 2]; // stop_flag, bsz, tokens
32+
float mtext_f[MAX_BSZ * (K + 1)]; // score
33+
int mtext_ranks[MAX_BSZ]; // ranks
34+
};
35+
36+
void GetOutputTopK(const paddle::Tensor& x,
37+
const paddle::Tensor& scores,
38+
const paddle::Tensor& ranks,
39+
int k,
40+
int64_t rank_id,
41+
bool wait_flag) {
42+
static struct msgdata msg_rcv;
43+
int msg_queue_id = 1;
44+
45+
if (const char* inference_msg_queue_id_env_p =
46+
std::getenv("INFERENCE_MSG_QUEUE_ID")) {
47+
std::string inference_msg_queue_id_env_str(inference_msg_queue_id_env_p);
48+
int inference_msg_queue_id_from_env =
49+
std::stoi(inference_msg_queue_id_env_str);
50+
#ifdef GET_OUTPUT_DEBUG
51+
std::cout << "Your INFERENCE_MSG_QUEUE_ID is: "
52+
<< inference_msg_queue_id_from_env << std::endl;
53+
#endif
54+
msg_queue_id = inference_msg_queue_id_from_env;
55+
}
56+
static key_t key = ftok("/dev/shm", msg_queue_id);
57+
58+
static int msgid = msgget(key, IPC_CREAT | 0666);
59+
#ifdef GET_OUTPUT_DEBUG
60+
std::cout << "get_output_key: " << key << std::endl;
61+
std::cout << "get_output msgid: " << msgid << std::endl;
62+
#endif
63+
64+
int64_t* out_data = const_cast<int64_t*>(x.data<int64_t>());
65+
float* scores_data = const_cast<float*>(scores.data<float>());
66+
int64_t* ranks_data = const_cast<int64_t*>(ranks.data<int64_t>());
67+
68+
size_t msg_len = (MAX_BSZ * (K + 1) + 2) * sizeof(int) +
69+
(MAX_BSZ * (K + 1)) * sizeof(float) + MAX_BSZ * sizeof(int);
70+
71+
int ret = -1;
72+
if (!wait_flag) {
73+
ret = msgrcv(msgid, &msg_rcv, msg_len, 0, IPC_NOWAIT);
74+
} else {
75+
ret = msgrcv(msgid, &msg_rcv, msg_len, 0, 0);
76+
}
77+
78+
if (ret == -1) {
79+
out_data[0] = -2;
80+
out_data[1] = 0;
81+
return;
82+
}
83+
84+
int bsz = msg_rcv.mtext[1];
85+
out_data[0] = (int64_t)msg_rcv.mtext[0];
86+
out_data[1] = (int64_t)msg_rcv.mtext[1];
87+
88+
for (int i = 0; i < bsz; i++) {
89+
for (int j = 0; j < k + 1; j++) {
90+
const int64_t offset = i * (K + 1) + j;
91+
out_data[offset + 2] = (int64_t)msg_rcv.mtext[offset + 2];
92+
scores_data[offset] = msg_rcv.mtext_f[offset];
93+
}
94+
ranks_data[i] = (int64_t)msg_rcv.mtext_ranks[i];
95+
}
96+
return;
97+
}
98+
99+
PD_BUILD_STATIC_OP(get_output_topk)
100+
.Inputs({"x", "scores", "ranks"})
101+
.Attrs({"k: int", "rank_id: int64_t", "wait_flag: bool"})
102+
.Outputs({"x_out", "scores_out", "ranks_out"})
103+
.SetInplaceMap({{"x", "x_out"},
104+
{"scores", "scores_out"},
105+
{"ranks", "ranks_out"}})
106+
.SetKernelFn(PD_KERNEL(GetOutputTopK));
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include <stdio.h>
16+
#include <string.h>
17+
#include <sys/ipc.h>
18+
#include <sys/msg.h>
19+
#include <sys/types.h>
20+
#include "paddle/extension.h"
21+
22+
#ifndef PD_BUILD_STATIC_OP
23+
#define PD_BUILD_STATIC_OP(name) PD_BUILD_OP(static_op_##name)
24+
#endif
25+
26+
#define MAX_BSZ 128
27+
#define K 5
28+
// #define SAVE_WITH_OUTPUT_DEBUG
29+
30+
struct msgdata {
31+
long mtype;
32+
int mtext[MAX_BSZ * (K + 1) + 2]; // stop_flag, bsz, tokens
33+
float mtext_f[MAX_BSZ * (K + 1)]; // score
34+
int mtext_ranks[MAX_BSZ]; // ranks
35+
};
36+
37+
void SaveOutMmsgTopK(const paddle::Tensor& x,
38+
const paddle::Tensor& logprob_token_ids, // [bsz, k+1]
39+
const paddle::Tensor& logprob_scores, // [bsz, k+1]
40+
const paddle::Tensor& ranks,
41+
const paddle::Tensor& not_need_stop,
42+
int64_t rank_id) {
43+
if (rank_id > 0) {
44+
return;
45+
}
46+
auto x_cpu = x.copy_to(paddle::CPUPlace(), false);
47+
auto logprob_token_ids_cpu =
48+
logprob_token_ids.copy_to(paddle::CPUPlace(), false);
49+
auto logprob_scores_cpu = logprob_scores.copy_to(paddle::CPUPlace(), false);
50+
auto ranks_cpu = ranks.copy_to(paddle::CPUPlace(), false);
51+
int64_t* x_data = x_cpu.data<int64_t>();
52+
int64_t* logprob_token_ids_data = logprob_token_ids_cpu.data<int64_t>();
53+
float* logprob_scores_data = logprob_scores_cpu.data<float>();
54+
int64_t* ranks_data = ranks_cpu.data<int64_t>();
55+
static struct msgdata msg_sed;
56+
int msg_queue_id = 1;
57+
if (const char* inference_msg_queue_id_env_p =
58+
std::getenv("INFERENCE_MSG_QUEUE_ID")) {
59+
std::string inference_msg_queue_id_env_str(inference_msg_queue_id_env_p);
60+
int inference_msg_queue_id_from_env =
61+
std::stoi(inference_msg_queue_id_env_str);
62+
msg_queue_id = inference_msg_queue_id_from_env;
63+
#ifdef SAVE_WITH_OUTPUT_DEBUG
64+
std::cout << "Your INFERENCE_MSG_QUEUE_ID is: "
65+
<< inference_msg_queue_id_from_env << std::endl;
66+
#endif
67+
} else {
68+
#ifdef SAVE_WITH_OUTPUT_DEBUG
69+
std::cout << "Failed to got INFERENCE_MSG_QUEUE_ID at env, use default."
70+
<< std::endl;
71+
#endif
72+
}
73+
int inference_msg_id_from_env = 1;
74+
if (const char* inference_msg_id_env_p = std::getenv("INFERENCE_MSG_ID")) {
75+
std::string inference_msg_id_env_str(inference_msg_id_env_p);
76+
inference_msg_id_from_env = std::stoi(inference_msg_id_env_str);
77+
if (inference_msg_id_from_env == 2) {
78+
// 2 and -2 is preserve for no-output indication.
79+
throw std::runtime_error(
80+
" INFERENCE_MSG_ID cannot be 2, please use other number.");
81+
}
82+
if (inference_msg_id_from_env < 0) {
83+
throw std::runtime_error(
84+
" INFERENCE_MSG_ID cannot be negative, please use other "
85+
"number.");
86+
}
87+
#ifdef SAVE_WITH_OUTPUT_DEBUG
88+
std::cout << "Your INFERENCE_MSG_ID is: " << inference_msg_id_from_env
89+
<< std::endl;
90+
#endif
91+
} else {
92+
#ifdef SAVE_WITH_OUTPUT_DEBUG
93+
std::cout << "Failed to got INFERENCE_MSG_ID at env, use (int)1 as default."
94+
<< std::endl;
95+
#endif
96+
}
97+
static key_t key = ftok("/dev/shm", msg_queue_id);
98+
static int msgid = msgget(key, IPC_CREAT | 0666);
99+
#ifdef SAVE_WITH_OUTPUT_DEBUG
100+
std::cout << "save_output_key: " << key << std::endl;
101+
std::cout << "save msgid: " << msgid << std::endl;
102+
#endif
103+
msg_sed.mtype = 1;
104+
bool not_need_stop_data = not_need_stop.data<bool>()[0];
105+
msg_sed.mtext[0] = not_need_stop_data ? inference_msg_id_from_env
106+
: -inference_msg_id_from_env;
107+
int bsz = x.shape()[0];
108+
int max_num_logprobs = logprob_token_ids.shape()[1];
109+
msg_sed.mtext[1] = bsz;
110+
for (int i = 0; i < bsz; i++) {
111+
for (int j = 0; j < K + 1; j++) {
112+
const int64_t offset = i * (K + 1) + j;
113+
if (j == 0) {
114+
msg_sed.mtext[offset + 2] = (int)x_data[i];
115+
msg_sed.mtext_f[offset] = logprob_scores_data[i * max_num_logprobs + j];
116+
} else if (j < max_num_logprobs) {
117+
msg_sed.mtext[offset + 2] =
118+
(int)logprob_token_ids_data[i * max_num_logprobs + j];
119+
msg_sed.mtext_f[offset] = logprob_scores_data[i * max_num_logprobs + j];
120+
} else {
121+
msg_sed.mtext[offset + 2] = -1;
122+
msg_sed.mtext_f[offset] = 0.0;
123+
}
124+
}
125+
msg_sed.mtext_ranks[i] = (int)ranks_data[i];
126+
}
127+
#ifdef SAVE_WITH_OUTPUT_DEBUG
128+
std::cout << "msg data: ";
129+
for (int i = 0; i < bsz; i++) {
130+
std::cout << " " << (int)x_data[i];
131+
}
132+
std::cout << std::endl;
133+
#endif
134+
135+
size_t msg_len = (MAX_BSZ * (K + 1) + 2) * sizeof(int) +
136+
(MAX_BSZ * (K + 1)) * sizeof(float) + MAX_BSZ * sizeof(int);
137+
138+
if ((msgsnd(msgid, &msg_sed, msg_len, 0)) == -1) {
139+
printf("full msg buffer\n");
140+
}
141+
return;
142+
}
143+
144+
PD_BUILD_STATIC_OP(save_output_topk)
145+
.Inputs({"x", "topk_ids", "logprob_scores", "ranks", "not_need_stop"})
146+
.Attrs({"rank_id: int64_t"})
147+
.Outputs({"x_out"})
148+
.SetInplaceMap({{"x", "x_out"}})
149+
.SetKernelFn(PD_KERNEL(SaveOutMmsgTopK));

0 commit comments

Comments
 (0)