Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
avshalomman committed Jan 6, 2025
1 parent 0bbd8c8 commit 2161b73
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions vllm/model_executor/layers/fused_moe/moe_torch_iterative.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import torch.nn.functional as F


def fused_moe(
hidden_states: torch.Tensor,
w1: torch.Tensor,
Expand All @@ -21,7 +22,6 @@ def fused_moe(
num_tokens = hidden_states.shape[:-1].numel()
num_experts = w1.shape[0]
intermediate_size = w2.shape[-1]
device = hidden_states.device
dtype = hidden_states.dtype

hidden_states = hidden_states.view(num_tokens, hidden_size)
Expand All @@ -31,7 +31,7 @@ def fused_moe(
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
topk_weights = topk_weights.to(dtype)

final_hidden_states = None
for expert_idx in range(num_experts):
expert_w1 = w1[expert_idx]
Expand Down

0 comments on commit 2161b73

Please sign in to comment.