|
| 1 | +# For disaggregated mode, set is_disaggregated: true, and set the following parameters: |
| 2 | +# Prefiller_index: the hosts index of the node running prefiller |
| 3 | +# Decoder_index: the hosts index of the node running decoder |
| 4 | +# Suppose we have **4 nodes** running a 2P1D setup (2 Prefillers + 1 Decoder): |
| 5 | +# ┌───────────────┬───────────────┬───────────────┬───────────────┐ |
| 6 | +# │ node0 │ node1 │ node2 │ node3 │ |
| 7 | +# │ Prefiller #1 │ Prefiller #2 │ Decoder │ Decoder │ |
| 8 | +# └───────────────┴───────────────┴───────────────┴───────────────┘ |
| 9 | +# For the prefiller nodes. the hosts should be node0 and node1 |
| 10 | +# For the decoder nodes. we only have 1 decoder node(dp+tp+ep across node2 and node3. Where node3 is running with headless mode) |
| 11 | +# So the prefiller_host_index is [0, 1], and the decoder_host_index is [2] |
| 12 | +test_name: "test DeepSeek-V3 disaggregated_prefill" |
| 13 | +model: "vllm-ascend/DeepSeek-V3-W8A8" |
| 14 | +num_nodes: 2 |
| 15 | +npu_per_node: 16 |
| 16 | +env_common: |
| 17 | + VLLM_USE_MODELSCOPE: true |
| 18 | + OMP_PROC_BIND: false |
| 19 | + OMP_NUM_THREADS: 100 |
| 20 | + HCCL_BUFFSIZE: 1024 |
| 21 | + SERVER_PORT: 8080 |
| 22 | + NUMEXPR_MAX_THREADS: 128 |
| 23 | + DISAGGREGATED_PREFILL_PROXY_SCRIPT: "examples/disaggregated_prefill_v1/load_balance_proxy_server_example.py" |
| 24 | +# For None kubernetes deployment, list the IPs of all nodes used in order as follow |
| 25 | +# cluster_hosts: [] |
| 26 | +disaggregated_prefill: |
| 27 | + enabled: true |
| 28 | + prefiller_host_index: [0] |
| 29 | + decoder_host_index: [1] |
| 30 | + |
| 31 | +deployment: |
| 32 | + - |
| 33 | + server_cmd: > |
| 34 | + vllm serve "vllm-ascend/DeepSeek-V3-W8A8" |
| 35 | + --host 0.0.0.0 |
| 36 | + --port $SERVER_PORT |
| 37 | + --data-parallel-size 2 |
| 38 | + --data-parallel-size-local 2 |
| 39 | + --tensor-parallel-size 8 |
| 40 | + --seed 1024 |
| 41 | + --enforce-eager |
| 42 | + --enable-expert-parallel |
| 43 | + --max-num-seqs 16 |
| 44 | + --max-model-len 8192 |
| 45 | + --max-num-batched-tokens 8192 |
| 46 | + --quantization ascend |
| 47 | + --trust-remote-code |
| 48 | + --no-enable-prefix-caching |
| 49 | + --gpu-memory-utilization 0.9 |
| 50 | + --kv-transfer-config |
| 51 | + '{"kv_connector": "MooncakeLayerwiseConnector", |
| 52 | + "kv_role": "kv_producer", |
| 53 | + "kv_port": "30000", |
| 54 | + "engine_id": "0", |
| 55 | + "kv_connector_module_path": "vllm_ascend.distributed.mooncake_layerwise_connector", |
| 56 | + "kv_connector_extra_config": { |
| 57 | + "prefill": { |
| 58 | + "dp_size": 2, |
| 59 | + "tp_size": 8 |
| 60 | + }, |
| 61 | + "decode": { |
| 62 | + "dp_size": 2, |
| 63 | + "tp_size": 8 |
| 64 | + } |
| 65 | + } |
| 66 | + }' |
| 67 | +
|
| 68 | + - |
| 69 | + server_cmd: > |
| 70 | + vllm serve "vllm-ascend/DeepSeek-V3-W8A8" |
| 71 | + --host 0.0.0.0 |
| 72 | + --port $SERVER_PORT |
| 73 | + --data-parallel-size 2 |
| 74 | + --data-parallel-size-local 2 |
| 75 | + --tensor-parallel-size 8 |
| 76 | + --seed 1024 |
| 77 | + --quantization ascend |
| 78 | + --max-num-seqs 16 |
| 79 | + --max-model-len 8192 |
| 80 | + --max-num-batched-tokens 8192 |
| 81 | + --enable-expert-parallel |
| 82 | + --trust-remote-code |
| 83 | + --no-enable-prefix-caching |
| 84 | + --gpu-memory-utilization 0.9 |
| 85 | + --additional-config '{"torchair_graph_config":{"enabled":true}}' |
| 86 | + --kv-transfer-config |
| 87 | + '{"kv_connector": "MooncakeLayerwiseConnector", |
| 88 | + "kv_role": "kv_consumer", |
| 89 | + "kv_port": "30200", |
| 90 | + "engine_id": "1", |
| 91 | + "kv_connector_module_path": "vllm_ascend.distributed.mooncake_layerwise_connector", |
| 92 | + "kv_connector_extra_config": { |
| 93 | + "prefill": { |
| 94 | + "dp_size": 2, |
| 95 | + "tp_size": 8 |
| 96 | + }, |
| 97 | + "decode": { |
| 98 | + "dp_size": 2, |
| 99 | + "tp_size": 8 |
| 100 | + } |
| 101 | + } |
| 102 | + }' |
| 103 | +benchmarks: |
| 104 | + acc: |
| 105 | + case_type: accuracy |
| 106 | + dataset_path: vllm-ascend/gsm8k-lite |
| 107 | + request_conf: vllm_api_general_chat |
| 108 | + dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt |
| 109 | + max_out_len: 4096 |
| 110 | + batch_size: 512 |
| 111 | + baseline: 95 |
| 112 | + threshold: 5 |
0 commit comments