From 1d71f218e4e13b764e7b4c2b33b9d7d08b5441f4 Mon Sep 17 00:00:00 2001 From: xiaobo Date: Mon, 16 Dec 2024 18:18:06 +0800 Subject: [PATCH] fix moe-ep bug --- python/sglang/srt/layers/ep_moe/layer.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/sglang/srt/layers/ep_moe/layer.py b/python/sglang/srt/layers/ep_moe/layer.py index eca119845a7..3c477fdc2ef 100644 --- a/python/sglang/srt/layers/ep_moe/layer.py +++ b/python/sglang/srt/layers/ep_moe/layer.py @@ -644,6 +644,10 @@ def process_weights_after_loading(self, layer: Module) -> None: "QuantConfig has static quantization, but found " "activation scales are None." ) + layer.w13_weight_scale = torch.nn.Parameter( + torch.max(layer.w13_weight_scale, dim=1).values, + requires_grad=False, + ) return def apply(