diff --git a/protocol/src/schema/llm/LlmTextModelParams.ts b/protocol/src/schema/llm/LlmTextModelParams.ts index 793f70c..cbb0074 100644 --- a/protocol/src/schema/llm/LlmTextModelParams.ts +++ b/protocol/src/schema/llm/LlmTextModelParams.ts @@ -14,6 +14,7 @@ export interface LlmTextModelParams { candidateCount?: number; seed?: number; stream?: boolean; + thinking?: boolean; // Perplexity-specific parameters web_search?: boolean; @@ -82,6 +83,11 @@ export const LlmTextModelParamsSchema = new Schema({ type: 'boolean', optional: true, }, + thinking: { + type: 'boolean', + optional: true, + default: false, + }, // Perplexity-specific parameters web_search: { type: 'boolean', diff --git a/src/main/services/llm/AnthropicLlmService.ts b/src/main/services/llm/AnthropicLlmService.ts index 696ccb2..34bf587 100644 --- a/src/main/services/llm/AnthropicLlmService.ts +++ b/src/main/services/llm/AnthropicLlmService.ts @@ -121,6 +121,9 @@ export class AnthropicLlmService extends LlmService { stop_sequences: req.params?.stopSequences, stream: req.params?.stream, system: req.system, + thinking: { + type: req.params?.thinking ? 'enabled' : 'disabled' + } }; } diff --git a/src/main/services/llm/GeminiLlmService.ts b/src/main/services/llm/GeminiLlmService.ts index 4138718..521d39f 100644 --- a/src/main/services/llm/GeminiLlmService.ts +++ b/src/main/services/llm/GeminiLlmService.ts @@ -183,6 +183,9 @@ export class GeminiLlmService extends LlmService { top_p: req.params?.topP, top_k: req.params?.topK, stop_sequences: req.params?.stopSequences, + thinkingConfig: { + thinkingBudget: req.params?.thinking ? -1 : 0 + } } }; } diff --git a/src/main/services/llm/GroqLlmService.ts b/src/main/services/llm/GroqLlmService.ts index ef484ac..94ff9d6 100644 --- a/src/main/services/llm/GroqLlmService.ts +++ b/src/main/services/llm/GroqLlmService.ts @@ -117,6 +117,8 @@ export class GroqLlmService extends LlmService { stop: req.params?.stopSequences, frequency_penalty: req.params?.frequencyPenalty, presence_penalty: req.params?.presencePenalty, + reasoning_effort: req.params?.thinking ? 'default' : 'none', + reasoning_format: req.params?.thinking ? 'parsed' : 'hidden', response_format: req.params?.responseFormat, seed: req.params?.seed, stream: req.params?.stream diff --git a/src/main/services/llm/OpenaAiLlmService.ts b/src/main/services/llm/OpenaAiLlmService.ts index d624421..255c16c 100644 --- a/src/main/services/llm/OpenaAiLlmService.ts +++ b/src/main/services/llm/OpenaAiLlmService.ts @@ -165,7 +165,8 @@ export class OpenaAiLlmService extends LlmService { logit_bias: req.params?.logitBias, response_format: req.params?.responseFormat, seed: req.params?.seed, - stream: req.params?.stream + stream: req.params?.stream, + reasoning_effort: req.params?.thinking ? 'medium' : 'minimal' }; // Some models use max_completion_tokens, others use max_tokens diff --git a/src/main/services/llm/XAiLlmService.ts b/src/main/services/llm/XAiLlmService.ts index a02477b..9b70148 100644 --- a/src/main/services/llm/XAiLlmService.ts +++ b/src/main/services/llm/XAiLlmService.ts @@ -134,6 +134,7 @@ export class XAiLlmService extends LlmService { stop: req.params?.stopSequences, frequency_penalty: req.params?.frequencyPenalty, presence_penalty: req.params?.presencePenalty, + reasoning_effort: req.params?.thinking ? 'high' : 'low', response_format: req.params?.responseFormat, seed: req.params?.seed, stream: req.params?.stream