We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f567f30 commit 3889872Copy full SHA for 3889872
tensorrt_llm/llmapi/llm_args.py
@@ -2778,7 +2778,9 @@ class TorchLlmArgs(BaseLlmArgs):
2778
disable_flashinfer_sampling: bool = Field(
2779
default=True,
2780
description="Disable the use of FlashInfer.sampling.",
2781
- status="prototype")
+ status="deprecated",
2782
+ deprecated="This option is likely to be removed in the future.",
2783
+ )
2784
2785
@property
2786
def quant_config(self) -> QuantConfig:
0 commit comments