Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions electron/services/providers/provider-runtime-sync.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,19 @@ function normalizeProviderBaseUrl(
return normalized.replace(/\/v1$/, '').replace(/\/anthropic$/, '').replace(/\/$/, '') + '/anthropic';
}

if (config.type === 'ollama') {
// Strip any trailing chat endpoint suffix, then ensure /v1 is present.
// Ollama's OpenAI-compatible API is always served under /v1, so requests to
// bare URLs like http://localhost:11434 (without /v1) result in 410 errors.
const withoutEndpoint = normalized
.replace(/\/v1\/chat\/completions$/i, '/v1')
.replace(/\/chat\/completions$/i, '');
if (!withoutEndpoint.endsWith('/v1')) {
return withoutEndpoint + '/v1';
Comment on lines +60 to +63
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Preserve protocol-aware URL trimming for Ollama base URLs

The new Ollama branch ignores apiProtocol and only strips chat-completions suffixes before appending /v1. If an Ollama account is configured with openai-responses or anthropic-messages and a user provides an endpoint URL like .../v1/responses or .../v1/messages, this logic produces malformed URLs such as .../v1/responses/v1, causing runtime calls to fail. Previously the unregistered-provider path trimmed protocol-specific suffixes; this change regresses that behavior for non-default Ollama protocol selections.

Useful? React with 👍 / 👎.

}
return withoutEndpoint;
}

if (isUnregisteredProviderType(config.type)) {
const protocol = apiProtocol || config.apiProtocol || 'openai-completions';
if (protocol === 'openai-responses') {
Expand Down
17 changes: 17 additions & 0 deletions src/stores/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1386,6 +1386,23 @@ export const useChatStore = create<ChatState>((set, get) => ({
}
}

// Guard: during an active send, don't let a history poll replace the local
// conversation with fewer messages from the gateway. This can happen when
// the gateway is reconnecting after a brief disconnect or hasn't fully
// persisted the conversation yet. Without this guard, the history poll
// causes chat history to vanish mid-conversation (issue #709).
{
const preApplyState = get();
if (
preApplyState.sending &&
preApplyState.lastUserMessageAt &&
finalMessages.length < preApplyState.messages.length &&
preApplyState.messages.length > 1
Comment on lines +1397 to +1400
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Compare message recency, not count, before rejecting polled history

This length-based guard can discard newer gateway history in long sessions because chat.history is fetched with limit: 200, so a valid post-send response may legitimately have fewer messages than local state after optimistic appends. In that case finalMessages is replaced with stale local messages, but later in the same function filteredMessages (from the discarded history) can still flip sending to false, stopping polling and leaving the assistant reply missing from the UI. The regression is reachable when a session is near/over the 200-message window and the send flow relies on history polling instead of streamed finals.

Useful? React with 👍 / 👎.

) {
finalMessages = preApplyState.messages;
}
}

set({ messages: finalMessages, thinkingLevel, loading: false });

// Extract first user message text as a session label for display in the toolbar.
Expand Down
2 changes: 1 addition & 1 deletion src/stores/skills.ts
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ export const useSkillsStore = create<SkillsState>((set, get) => ({
// Merge with ClawHub results
if (clawhubResult.success && clawhubResult.results) {
clawhubResult.results.forEach((cs: ClawHubListResult) => {
const existing = combinedSkills.find(s => s.id === cs.slug);
const existing = combinedSkills.find(s => s.id === cs.slug || s.slug === cs.slug);
if (existing) {
if (!existing.baseDir && cs.baseDir) {
existing.baseDir = cs.baseDir;
Expand Down
50 changes: 50 additions & 0 deletions tests/unit/provider-runtime-sync.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,56 @@ describe('provider-runtime-sync refresh strategy', () => {
expect(gateway.debouncedReload).toHaveBeenCalledTimes(1);
});

it('auto-appends /v1 to Ollama base URL when missing to prevent 410 errors', async () => {
const ollamaProvider = createProvider({
id: 'ollamafd',
type: 'ollama',
name: 'Ollama',
model: 'llama3:latest',
baseUrl: 'http://localhost:11434',
});

mocks.getProviderConfig.mockReturnValue(undefined);
mocks.getProviderSecret.mockResolvedValue({ type: 'local', apiKey: 'ollama-local' });

const gateway = createGateway('running');
await syncSavedProviderToRuntime(ollamaProvider, undefined, gateway as GatewayManager);

expect(mocks.syncProviderConfigToOpenClaw).toHaveBeenCalledWith(
'ollama-ollamafd',
'llama3:latest',
expect.objectContaining({
baseUrl: 'http://localhost:11434/v1',
api: 'openai-completions',
}),
);
});

it('normalizes Ollama base URL with trailing /chat/completions by stripping and ensuring /v1', async () => {
const ollamaProvider = createProvider({
id: 'ollamafd',
type: 'ollama',
name: 'Ollama',
model: 'llama3:latest',
baseUrl: 'http://localhost:11434/v1/chat/completions',
});

mocks.getProviderConfig.mockReturnValue(undefined);
mocks.getProviderSecret.mockResolvedValue({ type: 'local', apiKey: 'ollama-local' });

const gateway = createGateway('running');
await syncSavedProviderToRuntime(ollamaProvider, undefined, gateway as GatewayManager);

expect(mocks.syncProviderConfigToOpenClaw).toHaveBeenCalledWith(
'ollama-ollamafd',
'llama3:latest',
expect.objectContaining({
baseUrl: 'http://localhost:11434/v1',
api: 'openai-completions',
}),
);
});

it('syncs Ollama as default provider with correct baseUrl and api protocol', async () => {
const ollamaProvider = createProvider({
id: 'ollamafd',
Expand Down
28 changes: 28 additions & 0 deletions tests/unit/skills-errors.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,34 @@ vi.mock('@/stores/gateway', () => ({
},
}));

describe('skills store slug matching', () => {
beforeEach(() => {
vi.resetModules();
vi.clearAllMocks();
});

it('matches ClawHub skill to gateway skill when gateway slug differs from skillKey', async () => {
// Gateway returns skillKey "foo-v2" but slug "foo"
rpcMock.mockResolvedValueOnce({
skills: [{ skillKey: 'foo-v2', slug: 'foo', name: 'Foo Skill', description: 'A skill', disabled: false }],
});
// ClawHub lists "foo" as installed (matching by slug, not skillKey)
hostApiFetchMock
.mockResolvedValueOnce({ success: true, results: [{ slug: 'foo', version: '1.0.0' }] })
.mockResolvedValueOnce({});

const { useSkillsStore } = await import('@/stores/skills');
await useSkillsStore.getState().fetchSkills();

const skills = useSkillsStore.getState().skills;
// Should be exactly one skill, not two (no placeholder duplicate)
expect(skills).toHaveLength(1);
// The skill should be the gateway skill (not the "Recently installed" placeholder)
expect(skills[0].name).toBe('Foo Skill');
expect(skills[0].description).not.toBe('Recently installed, initializing...');
});
});

describe('skills store error mapping', () => {
beforeEach(() => {
vi.resetModules();
Expand Down
Loading