From ce242ef396fe34813ac7606e5158d563615a7391 Mon Sep 17 00:00:00 2001 From: Nathan Habib Date: Fri, 22 Aug 2025 09:19:56 +0000 Subject: [PATCH 1/2] adds enable thinkknig to model args --- src/lighteval/models/abstract_model.py | 1 + src/lighteval/tasks/prompt_manager.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/lighteval/models/abstract_model.py b/src/lighteval/models/abstract_model.py index ab4ce43e2..1f464ddda 100644 --- a/src/lighteval/models/abstract_model.py +++ b/src/lighteval/models/abstract_model.py @@ -81,6 +81,7 @@ class ModelConfig(BaseModel, extra="forbid"): generation_parameters: GenerationParameters = GenerationParameters() system_prompt: str | None = None + enable_thinking: bool | None = None cache_dir: str = "~/.cache/huggingface/lighteval" @classmethod diff --git a/src/lighteval/tasks/prompt_manager.py b/src/lighteval/tasks/prompt_manager.py index 6b7068bd8..c7653c7a7 100644 --- a/src/lighteval/tasks/prompt_manager.py +++ b/src/lighteval/tasks/prompt_manager.py @@ -40,8 +40,15 @@ class PromptManager: - def __init__(self, use_chat_template: bool = False, tokenizer=None, system_prompt: str | None = None): + def __init__( + self, + use_chat_template: bool = False, + tokenizer=None, + system_prompt: str | None = None, + enable_thinking: bool | None = None, + ): self.use_chat_template = use_chat_template + self.enable_thinking = enable_thinking self.tokenizer = tokenizer self.system_prompt = system_prompt # System prompt to be used in chat templates @@ -119,10 +126,16 @@ def _prepare_chat_template(self, doc: Doc, tokenize: bool = True) -> str: if tokenize: # for local models assert self.tokenizer is not None, "Tokenizer must be set for chat template formatting." + if self.enable_thinking is not None: + tokenizer_kwargs = {"enable_thinking": self.enable_thinking} + else: + tokenizer_kwargs = {} + return self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, + **tokenizer_kwargs, ) else: # for apis From fb8d37709726c5d78f4a7cf7c96c25aefd1bcc15 Mon Sep 17 00:00:00 2001 From: Nathan Habib Date: Fri, 22 Aug 2025 09:22:21 +0000 Subject: [PATCH 2/2] adds enable thinkknig to model args --- src/lighteval/models/transformers/transformers_model.py | 5 ++++- src/lighteval/models/vllm/vllm_model.py | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/lighteval/models/transformers/transformers_model.py b/src/lighteval/models/transformers/transformers_model.py index db2b68bd1..ae0e2b19a 100644 --- a/src/lighteval/models/transformers/transformers_model.py +++ b/src/lighteval/models/transformers/transformers_model.py @@ -229,7 +229,10 @@ def __init__( model_size = -1 self.prompt_manager = PromptManager( - use_chat_template=self.use_chat_template, tokenizer=self.tokenizer, system_prompt=config.system_prompt + use_chat_template=self.use_chat_template, + tokenizer=self.tokenizer, + system_prompt=config.system_prompt, + enable_thinking=config.enable_thinking, ) # Initialize cache for tokenization and predictions diff --git a/src/lighteval/models/vllm/vllm_model.py b/src/lighteval/models/vllm/vllm_model.py index b9f438e81..ec5a9ffc5 100644 --- a/src/lighteval/models/vllm/vllm_model.py +++ b/src/lighteval/models/vllm/vllm_model.py @@ -202,7 +202,9 @@ def __init__( self.pairwise_tokenization = config.pairwise_tokenization - self.prompt_manager = PromptManager(self.use_chat_template, self.tokenizer, config.system_prompt) + self.prompt_manager = PromptManager( + self.use_chat_template, self.tokenizer, config.system_prompt, config.enable_thinking + ) # Initialize cache for tokenization and predictions self._cache = SampleCache(config)