Skip to content

Commit 964be0e

Browse files
committed
[Model][MiniCPM] support MiniCPM
Signed-off-by: MengqingCao <[email protected]>
1 parent d785e78 commit 964be0e

File tree

3 files changed

+51
-0
lines changed

3 files changed

+51
-0
lines changed

vllm_ascend/patch/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,20 @@
125125
# Future Plan:
126126
# Revert it when the related pr is merged in vllm.
127127
#
128+
# ** File: worker/patch_common/patch_minicpm.py **
129+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
130+
# 1. `vllm.model_executor.models.minicpm.MiniCPMAttention.forward`
131+
# Why:
132+
# The forward func of MiniCPMAttention in vllm do a datatype convert
133+
# (original datatype --> float32) to ensure the precision on cuda.
134+
# However float32 is not supported in cann rope op, thus we keep this patch
135+
# How:
136+
# Removed the dtype convert operations in forward
137+
# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit....
138+
# NO, only for npu due to rope op.
139+
# Future Plan:
140+
# Keep this patch in vllm-ascend.
141+
#
128142
# ** File: worker/patch_common/patch_multi_step_worker.py **
129143
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
130144
# 1. `vllm.spec_decode.multi_step_worker.MultiStepWorker.sampler_output`

vllm_ascend/patch/worker/patch_common/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,6 @@
1616
#
1717

1818
import vllm_ascend.patch.worker.patch_common.patch_metrics # noqa
19+
import vllm_ascend.patch.worker.patch_common.patch_minicpm # noqa
1920
import vllm_ascend.patch.worker.patch_common.patch_multi_step_worker # noqa
2021
import vllm_ascend.patch.worker.patch_common.patch_spec_decode_worker # noqa
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# This file is a part of the vllm-ascend project.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
import torch
19+
from vllm.model_executor.models.minicpm import MiniCPMAttention
20+
21+
22+
def forward(
23+
self,
24+
positions: torch.Tensor,
25+
hidden_states: torch.Tensor,
26+
) -> torch.Tensor:
27+
qkv, _ = self.qkv_proj(hidden_states)
28+
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
29+
q, k = self.rotary_emb(positions, q, k)
30+
attn_output = self.attn(q, k, v)
31+
output, _ = self.o_proj(attn_output)
32+
return output
33+
34+
35+
# The type conversion in the forward function is deleted to support the rope operator.
36+
MiniCPMAttention.forward = forward

0 commit comments

Comments
 (0)