diff --git a/python/sglang/srt/layers/ep_moe/layer.py b/python/sglang/srt/layers/ep_moe/layer.py index eca119845a7..3c477fdc2ef 100644 --- a/python/sglang/srt/layers/ep_moe/layer.py +++ b/python/sglang/srt/layers/ep_moe/layer.py @@ -644,6 +644,10 @@ def process_weights_after_loading(self, layer: Module) -> None: "QuantConfig has static quantization, but found " "activation scales are None." ) + layer.w13_weight_scale = torch.nn.Parameter( + torch.max(layer.w13_weight_scale, dim=1).values, + requires_grad=False, + ) return def apply(