Skip to content

Commit 14e84c9

Browse files
committed
refactor: rename modelSupportsThinking to modelSupportsReasoning and update related usages
Signed-off-by: Fred Bricon <[email protected]>
1 parent 0e1815c commit 14e84c9

File tree

4 files changed

+45
-21
lines changed

4 files changed

+45
-21
lines changed

core/llm/autodetect.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ function modelSupportsImages(
132132
return false;
133133
}
134134

135-
function modelSupportsThinking(
135+
function modelSupportsReasoning(
136136
model: ModelDescription | null | undefined,
137137
): boolean {
138138
if (!model) {
@@ -446,5 +446,5 @@ export {
446446
llmCanGenerateInParallel,
447447
modelSupportsImages,
448448
modelSupportsNextEdit,
449-
modelSupportsThinking,
449+
modelSupportsReasoning,
450450
};

gui/src/components/mainInput/InputToolbar.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import { LightBulbIcon as LightBulbIconSolid } from "@heroicons/react/24/solid";
77
import { InputModifiers } from "core";
88
import {
99
modelSupportsImages,
10-
modelSupportsThinking,
10+
modelSupportsReasoning,
1111
} from "core/llm/autodetect";
1212
import { useContext, useRef } from "react";
1313
import { IdeMessengerContext } from "../../context/IdeMessenger";
@@ -69,7 +69,7 @@ function InputToolbar(props: InputToolbarProps) {
6969
defaultModel.capabilities,
7070
);
7171

72-
const supportsThinking = modelSupportsThinking(defaultModel);
72+
const supportsReasoning = modelSupportsReasoning(defaultModel);
7373

7474
const smallFont = useFontSize(-2);
7575
const tinyFont = useFontSize(-3);
@@ -144,7 +144,7 @@ function InputToolbar(props: InputToolbarProps) {
144144
</ToolTip>
145145
</HoverItem>
146146
)}
147-
{supportsThinking && (
147+
{supportsReasoning && (
148148
<HoverItem
149149
onClick={() =>
150150
dispatch(setHasReasoningEnabled(!hasReasoningEnabled))

gui/src/hooks/ParallelListeners.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ import {
1919
} from "../redux/slices/sessionSlice";
2020
import { setTTSActive } from "../redux/slices/uiSlice";
2121

22-
import { modelSupportsThinking } from "core/llm/autodetect";
22+
import { modelSupportsReasoning } from "core/llm/autodetect";
2323
import { cancelStream } from "../redux/thunks/cancelStream";
2424
import { handleApplyStateUpdate } from "../redux/thunks/handleApplyStateUpdate";
2525
import { refreshSessionMetadata } from "../redux/thunks/session";
@@ -81,7 +81,7 @@ function ParallelListeners() {
8181
}
8282

8383
const chatModel = configResult.config?.selectedModelByRole.chat;
84-
const supportsReasoning = modelSupportsThinking(chatModel);
84+
const supportsReasoning = modelSupportsReasoning(chatModel);
8585
const isReasoningDisabled =
8686
chatModel?.completionOptions?.reasoning === false;
8787
dispatch(

gui/src/redux/thunks/streamNormalInput.ts

Lines changed: 38 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { createAsyncThunk, unwrapResult } from "@reduxjs/toolkit";
2-
import { LLMFullCompletionOptions, Tool } from "core";
2+
import { LLMFullCompletionOptions, ModelDescription, Tool } from "core";
33
import { getRuleId } from "core/llm/rules/getSystemMessageWithRules";
44
import { ToCoreProtocol } from "core/protocol";
55
import { selectActiveTools } from "../selectors/selectActiveTools";
@@ -85,6 +85,38 @@ async function handleToolCallExecution(
8585
}
8686
}
8787

88+
/**
89+
* Builds completion options with reasoning configuration based on session state and model capabilities.
90+
*
91+
* @param baseOptions - Base completion options to extend
92+
* @param hasReasoningEnabled - Whether reasoning is enabled in the session
93+
* @param model - The selected model with provider and completion options
94+
* @returns Completion options with reasoning configuration
95+
*/
96+
function buildReasoningCompletionOptions(
97+
baseOptions: LLMFullCompletionOptions,
98+
hasReasoningEnabled: boolean | undefined,
99+
model: ModelDescription,
100+
): LLMFullCompletionOptions {
101+
if (hasReasoningEnabled === undefined) {
102+
return baseOptions;
103+
}
104+
105+
const reasoningOptions: LLMFullCompletionOptions = {
106+
...baseOptions,
107+
reasoning: !!hasReasoningEnabled,
108+
};
109+
110+
// Add reasoning budget tokens if reasoning is enabled and provider supports it
111+
if (hasReasoningEnabled && model.underlyingProviderName !== "ollama") {
112+
// Ollama doesn't support limiting reasoning tokens at this point
113+
reasoningOptions.reasoningBudgetTokens =
114+
model.completionOptions?.reasoningBudgetTokens ?? 2048;
115+
}
116+
117+
return reasoningOptions;
118+
}
119+
88120
export const streamNormalInput = createAsyncThunk<
89121
void,
90122
{
@@ -121,19 +153,11 @@ export const streamNormalInput = createAsyncThunk<
121153
};
122154
}
123155

124-
if (state.session.hasReasoningEnabled !== undefined) {
125-
completionOptions = {
126-
...completionOptions,
127-
reasoning: !!state.session.hasReasoningEnabled,
128-
...(state.session.hasReasoningEnabled &&
129-
selectedChatModel.underlyingProviderName !== "ollama" && {
130-
// Ollama doesn't support limiting reasoning tokens at this point
131-
reasoningBudgetTokens:
132-
selectedChatModel.completionOptions?.reasoningBudgetTokens ??
133-
2048,
134-
}),
135-
};
136-
}
156+
completionOptions = buildReasoningCompletionOptions(
157+
completionOptions,
158+
state.session.hasReasoningEnabled,
159+
selectedChatModel,
160+
);
137161

138162
// Construct messages (excluding system message)
139163
const baseSystemMessage = getBaseSystemMessage(

0 commit comments

Comments
 (0)