From f6e0223b80b8d243b42b80ad1de2765d5fa1708d Mon Sep 17 00:00:00 2001 From: Kazuhiro Serizawa Date: Wed, 1 Jan 2025 14:42:12 +0900 Subject: [PATCH] docs: fix typo Signed-off-by: Kazuhiro Serizawa --- vllm/model_executor/layers/rejection_sampler.py | 2 +- vllm/v1/sample/ops/topk_topp_sampler.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/rejection_sampler.py b/vllm/model_executor/layers/rejection_sampler.py index 97a1b0c9603bd..165e8309fee64 100644 --- a/vllm/model_executor/layers/rejection_sampler.py +++ b/vllm/model_executor/layers/rejection_sampler.py @@ -39,7 +39,7 @@ def __init__(self, strict_mode: Whether or not to perform shape/device/dtype checks during sampling. This catches correctness issues but adds nontrivial latency. - use_falshinfer: We will use this parameter to determine whether + use_flashinfer: We will use this parameter to determine whether to use the FlashInfer rejection sampling kernel or not. If it's None, we will use the default value from the environment variable. This parameter is only used for testing purposes. diff --git a/vllm/v1/sample/ops/topk_topp_sampler.py b/vllm/v1/sample/ops/topk_topp_sampler.py index c088c3c129ca5..f2007d85c61a5 100644 --- a/vllm/v1/sample/ops/topk_topp_sampler.py +++ b/vllm/v1/sample/ops/topk_topp_sampler.py @@ -44,7 +44,7 @@ def __init__(self): logger.warning( "FlashInfer is not available. Falling back to the PyTorch-" "native implementation of top-p & top-k sampling. For the " - "best performance, please install FalshInfer.") + "best performance, please install FlashInfer.") self.forward = self.forward_native else: self.forward = self.forward_native