Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add message normalization for Ollama model compatibility with MCP tool calling (fixes Mistral Large 3, Ministral 3, Gemma3 27B)
3 changes: 3 additions & 0 deletions core/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions extensions/cli/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 11 additions & 1 deletion extensions/cli/src/stream/streamChatResponse.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import {
withExponentialBackoff,
} from "../util/exponentialBackoff.js";
import { logger } from "../util/logger.js";
import { normalizeMessagesForModel } from "../util/messageNormalizer.js";
import { validateContextLength } from "../util/tokenizer.js";

import { getRequestTools, handleToolCalls } from "./handleToolCalls.js";
Expand Down Expand Up @@ -257,6 +258,15 @@ export async function processStreamingResponse(
chatHistory,
systemMessage,
) as ChatCompletionMessageParam[];

// Normalize messages for model-specific compatibility (GitHub Issue #9249)
// Fixes: Mistral "Unexpected role 'system' after role 'tool'"
// Gemma "Invalid 'tool_calls': unknown variant 'index'"
const normalizedMessages = normalizeMessagesForModel(
openaiChatHistory,
model.model,
);

const requestStartTime = Date.now();

const streamFactory = async (retryAbortSignal: AbortSignal) => {
Expand All @@ -269,7 +279,7 @@ export async function processStreamingResponse(
llmApi,
{
model: model.model,
messages: openaiChatHistory,
messages: normalizedMessages,
stream: true,
tools,
...getDefaultCompletionOptions(model.defaultCompletionOptions),
Expand Down
106 changes: 106 additions & 0 deletions extensions/cli/src/util/messageNormalizer.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
/**
* Message Normalization for Model-Specific Compatibility
*
* Handles model-specific message formatting quirks to ensure compatibility
* across different LLM providers when using Ollama's OpenAI endpoint.
*
* Issues addressed:
* 1. Mistral/Ministral: "Unexpected role 'system' after role 'tool'"
* 2. Gemma3: "Invalid 'tool_calls': unknown variant 'index'"
*
* GitHub Issue: https://github.com/continuedev/continue/issues/9249
*/

import type { ChatCompletionMessageParam } from "openai/resources";

/**
* Normalize message list for model-specific requirements.
*
* @param messages - List of OpenAI-format messages
* @param modelName - Model identifier (e.g., 'mistral-large-3:675b-cloud')
* @returns Normalized message list safe for the target model
*/
export function normalizeMessagesForModel(
messages: ChatCompletionMessageParam[],
modelName: string,
): ChatCompletionMessageParam[] {
const modelLower = modelName.toLowerCase();

// Detect model family and apply appropriate normalization
if (modelLower.includes("mistral") || modelLower.includes("ministral")) {
return normalizeForMistral(messages);
} else if (modelLower.includes("gemma")) {
return normalizeForGemma(messages);
}

// No normalization needed for other models
return messages;
}

/**
* Fix Mistral's "Unexpected role 'system' after role 'tool'" error.
*
* Strategy: Move system messages before any tool interactions.
* If system message appears after tool, convert to user message.
*/
function normalizeForMistral(
messages: ChatCompletionMessageParam[],
): ChatCompletionMessageParam[] {
const normalized: ChatCompletionMessageParam[] = [];
const systemMessages: ChatCompletionMessageParam[] = [];
let hasToolInteraction = false;

for (const msg of messages) {
const role = msg.role;

if (role === "system") {
if (hasToolInteraction) {
// System after tool - convert to user message
normalized.push({
role: "user",
content: `[System instruction]: ${typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)}`,
});
} else {
// System before tool - keep as system
systemMessages.push(msg);
}
} else if (role === "tool") {
hasToolInteraction = true;
normalized.push(msg);
} else {
normalized.push(msg);
}
}

// Prepend system messages at the start
return [...systemMessages, ...normalized];
}

/**
* Fix Gemma's "Invalid 'tool_calls': unknown variant 'index'" error.
*
* Strategy: Remove 'index' field from tool_calls if present.
* Gemma expects only: id, type, function (name, arguments)
*/
function normalizeForGemma(
messages: ChatCompletionMessageParam[],
): ChatCompletionMessageParam[] {
return messages.map((msg) => {
// Only process assistant messages with tool_calls
if (msg.role !== "assistant" || !("tool_calls" in msg) || !msg.tool_calls) {
return msg;
}

// Remove 'index' field from each tool call
const cleanedToolCalls = msg.tool_calls.map((call: any) => {
// Create a new object without the 'index' field
const { index: _index, ...cleanedCall } = call;
return cleanedCall;
});

return {
...msg,
tool_calls: cleanedToolCalls,
};
});
}
9 changes: 9 additions & 0 deletions packages/config-yaml/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion packages/continue-sdk/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions packages/fetch/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions packages/llm-info/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading