Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ vscode/.metadata
# Miscellaneous
.vscode/*.aider*
.vscode/settings.json
vscode/.vscode-test
vscode/org.eclipse.*

# Allow PNG files in vscode/resources
!vscode/resources/*.png
Expand Down
65 changes: 65 additions & 0 deletions shared/src/types/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ export interface ExtensionData {
solutionScope?: Scope;
chatMessages: ChatMessage[];
configErrors: ConfigError[];
llmErrors: LLMError[];
profiles: AnalysisProfile[];
activeProfileId: string | null;
solutionServerEnabled: boolean;
Expand All @@ -146,6 +147,22 @@ export type ConfigErrorType =
| "genai-disabled"
| "solution-server-disconnected";

export type LLMErrorType =
| "workflow-initialization-failed"
| "llm-request-failed"
| "llm-response-parse-failed"
| "llm-timeout"
| "llm-rate-limit"
| "llm-context-limit"
| "llm-unknown-error";

export interface LLMError {
type: LLMErrorType;
message: string;
error?: string;
timestamp: string;
}

export interface ConfigError {
type: ConfigErrorType;
message: string;
Expand Down Expand Up @@ -200,6 +217,54 @@ export const createConfigError = {
}),
};

export const createLLMError = {
workflowInitializationFailed: (error?: string): LLMError => ({
type: "workflow-initialization-failed",
message: "Failed to initialize AI workflow. Please check your model configuration.",
error,
timestamp: new Date().toISOString(),
}),

llmRequestFailed: (error?: string): LLMError => ({
type: "llm-request-failed",
message: "Failed to get response from AI model. Please try again.",
error,
timestamp: new Date().toISOString(),
}),

llmResponseParseFailed: (error?: string): LLMError => ({
type: "llm-response-parse-failed",
message: "Failed to parse AI model response. The response format may be invalid.",
error,
timestamp: new Date().toISOString(),
}),

llmTimeout: (): LLMError => ({
type: "llm-timeout",
message: "AI model request timed out. Please try again or check your connection.",
timestamp: new Date().toISOString(),
}),

llmRateLimit: (): LLMError => ({
type: "llm-rate-limit",
message: "AI model rate limit exceeded. Please wait a moment before trying again.",
timestamp: new Date().toISOString(),
}),

llmContextLimit: (): LLMError => ({
type: "llm-context-limit",
message: "Request exceeds AI model context limit. Try analyzing fewer issues at once.",
timestamp: new Date().toISOString(),
}),

llmUnknownError: (error?: string): LLMError => ({
type: "llm-unknown-error",
message: "An unexpected error occurred with the AI model.",
error,
timestamp: new Date().toISOString(),
}),
};

export type ServerState =
| "initial"
| "configurationNeeded"
Expand Down
6 changes: 6 additions & 0 deletions vscode/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,12 @@
"title": "Show Diff Actions",
"category": "Konveyor",
"icon": "$(diff)"
},
{
"command": "konveyor.testLLMError",
"title": "Test LLM Error",
"category": "Konveyor",
"icon": "$(testing-error-icon)"
}
],
"submenus": [
Expand Down
116 changes: 102 additions & 14 deletions vscode/core/src/commands.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@ import {
Position,
} from "vscode";
import { cleanRuleSets, loadResultsFromDataFolder, loadRuleSets, loadStaticResults } from "./data";
import { EnhancedIncident, RuleSet, Scope, ChatMessageType } from "@editor-extensions/shared";
import {
EnhancedIncident,
RuleSet,
Scope,
ChatMessageType,
createLLMError,
} from "@editor-extensions/shared";
import {
type KaiWorkflowMessage,
type KaiInteractiveWorkflowInput,
Expand Down Expand Up @@ -202,6 +208,7 @@ const commandsMap: (
draft.solutionState = "started";
draft.solutionScope = scope;
draft.chatMessages = []; // Clear previous chat messages
draft.llmErrors = []; // Clear previous LLM errors
draft.activeDecorators = {};
});

Expand All @@ -219,12 +226,31 @@ const commandsMap: (

// Set the state to indicate we're fetching a solution

await state.workflowManager.init({
modelProvider: state.modelProvider,
workspaceDir: state.data.workspaceRoot,
solutionServerClient: state.solutionServerClient,
});
logger.debug("Agent initialized");
try {
await state.workflowManager.init({
modelProvider: state.modelProvider,
workspaceDir: state.data.workspaceRoot,
solutionServerClient: state.solutionServerClient,
});
logger.debug("Agent initialized");
} catch (initError) {
logger.error("Failed to initialize workflow", initError);
const errorMessage = initError instanceof Error ? initError.message : String(initError);

state.mutateData((draft) => {
draft.isFetchingSolution = false;
draft.solutionState = "failedOnSending";
draft.chatMessages.push({
messageToken: `m${Date.now()}`,
kind: ChatMessageType.String,
value: { message: `Workflow initialization failed: ${errorMessage}` },
timestamp: new Date().toISOString(),
});
});
executeDeferredWorkflowDisposal(state, logger);
window.showErrorMessage(`Failed to initialize workflow: ${errorMessage}`);
return;
}

// Get the workflow instance
workflow = state.workflowManager.getWorkflow();
Expand Down Expand Up @@ -270,15 +296,10 @@ const commandsMap: (
});

// Add error event listener to catch workflow errors
// These are handled by the workflow message processor
workflow.on("error", (error: any) => {
logger.error("Workflow error:", error);
state.mutateData((draft) => {
draft.isFetchingSolution = false;
if (draft.solutionState === "started") {
draft.solutionState = "failedOnSending";
}
});
executeDeferredWorkflowDisposal(state, logger);
// State updates will be handled by the Error message type in processMessage
});

try {
Expand All @@ -305,8 +326,18 @@ const commandsMap: (
logger.error(`Error in running the agent - ${err}`);
logger.info(`Error trace - `, err instanceof Error ? err.stack : "N/A");

const errorMessage = err instanceof Error ? err.message : String(err);

// Ensure isFetchingSolution is reset on any error
state.mutateData((draft) => {
// Add to chat messages for visibility
draft.chatMessages.push({
messageToken: `m${Date.now()}`,
kind: ChatMessageType.String,
value: { message: `Error: ${errorMessage}` },
timestamp: new Date().toISOString(),
});

draft.isFetchingSolution = false;
if (draft.solutionState === "started") {
draft.solutionState = "failedOnSending";
Expand Down Expand Up @@ -947,6 +978,63 @@ const commandsMap: (
await executeExtensionCommand("rejectDiff", filePath);
}
},

// Test command for simulating LLM errors (development only)
[`${EXTENSION_NAME}.testLLMError`]: async () => {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

did you mean to keep this?

const errorType = await vscode.window.showQuickPick(
[
{ label: "Timeout Error", value: "timeout" },
{ label: "Rate Limit Error", value: "rateLimit" },
{ label: "Context Limit Error", value: "contextLimit" },
{ label: "Parse Error", value: "parse" },
{ label: "Request Failed", value: "request" },
{ label: "Unknown Error", value: "unknown" },
],
{
placeHolder: "Select the type of LLM error to simulate",
},
);

if (!errorType) {
return;
}

// Simulate different LLM errors
let llmError;
switch (errorType.value) {
case "timeout":
llmError = createLLMError.llmTimeout();
break;
case "rateLimit":
llmError = createLLMError.llmRateLimit();
break;
case "contextLimit":
llmError = createLLMError.llmContextLimit();
break;
case "parse":
llmError = createLLMError.llmResponseParseFailed("Invalid JSON response from model");
break;
case "request":
llmError = createLLMError.llmRequestFailed("Connection refused to model API endpoint");
break;
case "unknown":
default:
llmError = createLLMError.llmUnknownError(
"An unexpected error occurred during model invocation",
);
break;
}

// Add the error to state
state.mutateData((draft) => {
draft.llmErrors.push(llmError);
});

logger.info(`Simulated LLM error: ${errorType.label}`);
vscode.window.showInformationMessage(
`Simulated ${errorType.label} - Check the Analysis or Resolution panel`,
);
},
};
};

Expand Down
1 change: 1 addition & 0 deletions vscode/core/src/extension.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ class VsCodeExtension {
solutionState: "none",
solutionServerEnabled: solutionServerConfig.enabled, // should we pass the full config object?
configErrors: [],
llmErrors: [],
activeProfileId: "",
profiles: [],
isAgentMode: getConfigAgentMode(),
Expand Down
55 changes: 54 additions & 1 deletion vscode/core/src/utilities/ModifiedFiles/processMessage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import {
} from "@editor-extensions/agentic";
import { flattenCurrentTasks, summarizeTasks, type TasksList } from "../../taskManager";
import { ExtensionState } from "../../extensionState";
import { ChatMessageType, ToolMessageValue } from "@editor-extensions/shared";
import { ChatMessageType, ToolMessageValue, createLLMError } from "@editor-extensions/shared";
import { handleModifiedFileMessage } from "./handleModifiedFile";
import { MessageQueueManager, handleUserInteractionComplete } from "./queueManager";

Expand Down Expand Up @@ -336,5 +336,58 @@ export const processMessageByType = async (
);
break;
}
case KaiWorkflowMessageType.Error: {
// Handle error messages from the workflow (including LLM errors)
const errorMessage = msg.data as string;
state.logger.error("Workflow error received:", errorMessage);

// Check if this is an LLM-specific error based on the error message
// The workflow emits "Failed to get llm response - " prefix for LLM errors
if (errorMessage.includes("Failed to get llm response")) {
// Extract the actual error message after the prefix
const actualError = errorMessage.replace("Failed to get llm response - ", "");
const lowerError = actualError.toLowerCase();

// Categorize the LLM error based on the actual error content
let llmError;
if (lowerError.includes("timeout") || lowerError.includes("timed out")) {
llmError = createLLMError.llmTimeout();
} else if (lowerError.includes("rate limit") || lowerError.includes("429")) {
llmError = createLLMError.llmRateLimit();
} else if (
lowerError.includes("context length") ||
lowerError.includes("token limit") ||
lowerError.includes("context_length_exceeded") ||
lowerError.includes("max_tokens")
) {
llmError = createLLMError.llmContextLimit();
} else if (
lowerError.includes("parse") ||
lowerError.includes("json") ||
lowerError.includes("invalid response")
) {
llmError = createLLMError.llmResponseParseFailed(actualError);
} else {
llmError = createLLMError.llmRequestFailed(actualError);
}

state.mutateData((draft) => {
draft.llmErrors.push(llmError);
});
} else {
// For non-LLM errors, just add to chat messages
state.mutateData((draft) => {
draft.chatMessages.push({
kind: ChatMessageType.String,
messageToken: msg.id,
timestamp: new Date().toISOString(),
value: {
message: `Error: ${errorMessage}`,
},
});
});
}
break;
}
}
};
6 changes: 6 additions & 0 deletions webview-ui/src/components/AnalysisPage/AnalysisPage.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
ruleSets: analysisResults,
enhancedIncidents,
configErrors: rawConfigErrors,
llmErrors: rawLLMErrors = [],
profiles,
activeProfileId,
serverState,
Expand All @@ -87,6 +88,7 @@
const [expandedViolations, setExpandedViolations] = useState<Set<string>>(new Set());
const [isConfigOpen, setIsConfigOpen] = useState(false);
const [isGenAIAlertDismissed, setIsGenAIAlertDismissed] = useState(false);
const [dismissedLLMErrors, setDismissedLLMErrors] = useState<Set<string>>(new Set());

const violations = useViolations(analysisResults);
const hasViolations = violations.length > 0;
Expand Down Expand Up @@ -213,10 +215,14 @@
)}
<ConfigAlerts
configErrors={rawConfigErrors}
llmErrors={rawLLMErrors.filter(error => !dismissedLLMErrors.has(error.timestamp))}

Check warning on line 218 in webview-ui/src/components/AnalysisPage/AnalysisPage.tsx

View workflow job for this annotation

GitHub Actions / Lint

Replace `error` with `(error)`
solutionServerEnabled={solutionServerEnabled}
solutionServerConnected={solutionServerConnected}
onOpenProfileManager={() => dispatch({ type: "OPEN_PROFILE_MANAGER", payload: {} })}
dispatch={dispatch}
onDismissLLMError={(timestamp) => {
setDismissedLLMErrors(prev => new Set([...prev, timestamp]));

Check warning on line 224 in webview-ui/src/components/AnalysisPage/AnalysisPage.tsx

View workflow job for this annotation

GitHub Actions / Lint

Replace `prev` with `(prev)`
}}
/>
{!isGenAIDisabled && !isGenAIAlertDismissed && (
<PageSection padding={{ default: "noPadding" }}>
Expand Down
Loading
Loading