diff --git a/.github/workflows/cli-pr-checks.yml b/.github/workflows/cli-pr-checks.yml index ea79442e2eb..6084b1610e3 100644 --- a/.github/workflows/cli-pr-checks.yml +++ b/.github/workflows/cli-pr-checks.yml @@ -72,7 +72,7 @@ jobs: node-version: ${{ matrix.node-version }} - name: Cache CLI node_modules - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: extensions/cli/node_modules key: ${{ runner.os }}-node${{ matrix.node-version }}-cli-modules-${{ hashFiles('extensions/cli/package-lock.json') }} diff --git a/.github/workflows/jetbrains-release.yaml b/.github/workflows/jetbrains-release.yaml index 38413cf0c5d..ce2c88439e9 100644 --- a/.github/workflows/jetbrains-release.yaml +++ b/.github/workflows/jetbrains-release.yaml @@ -157,19 +157,19 @@ jobs: node-version-file: ".nvmrc" - name: Cache core node_modules - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: core/node_modules key: ${{ runner.os }}-node-${{ hashFiles('core/package-lock.json') }} - name: Cache binary node_modules - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: binary/node_modules key: ${{ runner.os }}-node-${{ hashFiles('binary/package-lock.json') }} - name: Cache gui node_modules - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: gui/node_modules key: ${{ runner.os }}-node-${{ hashFiles('gui/package-lock.json') }} @@ -294,44 +294,44 @@ jobs: # Store already-built plugin as an artifact for downloading - name: Upload artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: ${{ steps.artifact.outputs.filename }} path: ./extensions/intellij/build/distributions/content/*/* # Upload binaries as artifacts - name: Upload artifact (darwin-arm64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-darwin-arm64 path: ./binary/bin/darwin-arm64/ - name: Upload artifact (darwin-x64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-darwin-x64 path: ./binary/bin/darwin-x64/ - name: Upload artifact (win32-x64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-win32-x64 path: ./binary/bin/win32-x64/ - name: Upload artifact (win32-arm64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-win32-arm64 path: ./binary/bin/win32-arm64/ - name: Upload artifact (linux-arm64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-linux-arm64 path: ./binary/bin/linux-arm64/ - name: Upload artifact (linux-x64) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: continue-binary-linux-x64 path: ./binary/bin/linux-x64/ @@ -375,13 +375,13 @@ jobs: node-version-file: ".nvmrc" - name: Cache core node_modules - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: core/node_modules key: ${{ runner.os }}-node-${{ hashFiles('core/package-lock.json') }} - name: Cache binary node_modules - uses: actions/cache@v3 + uses: actions/cache@v5 with: path: binary/node_modules key: ${{ runner.os }}-node-${{ hashFiles('binary/package-lock.json') }} @@ -398,7 +398,7 @@ jobs: # Download the binary artifact - name: Download binary artifact - uses: actions/download-artifact@v6 + uses: actions/download-artifact@v7 with: name: continue-binary-${{ matrix.platform }}-${{ matrix.arch }} path: ./binary/bin/${{ matrix.platform }}-${{ matrix.arch }}/ @@ -420,7 +420,7 @@ jobs: - name: Upload logs if: ${{ always() }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: core-logs-${{ matrix.platform }}-${{ matrix.arch }} path: ~/.continue/logs/core.log @@ -455,7 +455,7 @@ jobs: # Collect Tests Result of failed tests - name: Collect Tests Result if: ${{ failure() }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: tests-result path: ${{ github.workspace }}/extensions/intellij/build/reports/tests @@ -491,7 +491,7 @@ jobs: # Run Qodana inspections - name: Qodana - Code Inspection - uses: JetBrains/qodana-action@v2025.2.3 + uses: JetBrains/qodana-action@v2025.2.4 with: cache-default-branch-only: true @@ -528,7 +528,7 @@ jobs: # Cache Plugin Verifier IDEs - name: Setup Plugin Verifier IDEs Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ${{ needs.build.outputs.pluginVerifierHomeDir }}/ides key: plugin-verifier-${{ hashFiles('build/printProductsReleases.txt') }} @@ -540,7 +540,7 @@ jobs: # Collect Plugin Verifier Result - name: Collect Plugin Verifier Result if: ${{ always() }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: pluginVerifier-result path: ${{ github.workspace }}/build/reports/pluginVerifier @@ -563,7 +563,7 @@ jobs: # ./gradlew patchChangelog --release-note="$CHANGELOG" - name: Download the plugin - uses: actions/download-artifact@v6 + uses: actions/download-artifact@v7 with: name: ${{ steps.artifact.outputs.filename }} path: ./build/distributions/ diff --git a/.github/workflows/main-build.yaml b/.github/workflows/main-build.yaml index 62813ed2fe3..5b64db5b676 100644 --- a/.github/workflows/main-build.yaml +++ b/.github/workflows/main-build.yaml @@ -101,7 +101,7 @@ jobs: echo "βœ… Successfully downloaded artifact" - name: Republish as main branch artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: vscode-extension-build-${{ matrix.platform }} path: ./temp-download/* diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index c9aa1617793..98dd26c80ad 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -99,7 +99,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Upload .vsix artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.platform }}-${{ matrix.arch }}-vsix path: "extensions/vscode/*.vsix" @@ -120,7 +120,7 @@ jobs: git config --local user.name "GitHub Action" # Download the .vsix artifacts - - uses: actions/download-artifact@v6 + - uses: actions/download-artifact@v7 with: pattern: "*-vsix" path: vsix-artifacts @@ -156,7 +156,7 @@ jobs: run: git fetch origin ${{ github.ref }} && git checkout ${{ github.ref }} # 1. Download the artifacts - - uses: actions/download-artifact@v6 + - uses: actions/download-artifact@v7 with: pattern: "*-vsix" path: vsix-artifacts diff --git a/.github/workflows/metrics.yaml b/.github/workflows/metrics.yaml index b4840f716a6..6620b129a64 100644 --- a/.github/workflows/metrics.yaml +++ b/.github/workflows/metrics.yaml @@ -71,7 +71,7 @@ jobs: ${{ steps.read-metrics.outputs.metrics_content }} - name: Upload metrics report as artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: issue-metrics-report path: ./issue_metrics.md diff --git a/.github/workflows/pr-build-upload-vsix.yaml b/.github/workflows/pr-build-upload-vsix.yaml index 2d2dd8d3083..e4b144e8217 100644 --- a/.github/workflows/pr-build-upload-vsix.yaml +++ b/.github/workflows/pr-build-upload-vsix.yaml @@ -39,13 +39,13 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Upload build artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: vscode-extension-build-${{ matrix.platform }}-${{ matrix.arch }} path: extensions/vscode/build - name: Upload .vsix artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.platform }}-${{ matrix.arch }}-vsix path: "extensions/vscode/*.vsix" diff --git a/.github/workflows/pr-checks.yaml b/.github/workflows/pr-checks.yaml index de644388166..1cde9fea951 100644 --- a/.github/workflows/pr-checks.yaml +++ b/.github/workflows/pr-checks.yaml @@ -27,7 +27,7 @@ jobs: with: node-version-file: ".nvmrc" - - uses: actions/cache@v4 + - uses: actions/cache@v5 id: root-cache with: path: node_modules @@ -234,26 +234,26 @@ jobs: node-version-file: ".nvmrc" - name: Cache npm - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.npm key: ${{ runner.os }}-npm-cache-matrix-${{ hashFiles('core/package-lock.json', 'extensions/vscode/package-lock.json') }} - name: Cache packages node_modules - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: | packages/*/node_modules key: ${{ runner.os }}-packages-node-modules-${{ hashFiles('packages/*/package-lock.json') }} - name: Cache core node modules - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: core/node_modules key: ${{ runner.os }}-core-node-modules-${{ hashFiles('core/package-lock.json') }} - name: Cache vscode extension node modules - uses: actions/cache@v4 + uses: actions/cache@v5 id: vscode-cache with: path: extensions/vscode/node_modules @@ -303,7 +303,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Upload build artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: vscode-extension-build-Linux path: extensions/vscode/build diff --git a/.github/workflows/preview.yaml b/.github/workflows/preview.yaml index 76e5dbc4d71..b137d3c6103 100644 --- a/.github/workflows/preview.yaml +++ b/.github/workflows/preview.yaml @@ -83,7 +83,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Upload .vsix artifact - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.platform }}-${{ matrix.arch }}-vsix path: "extensions/vscode/*.vsix" @@ -104,7 +104,7 @@ jobs: git config --local user.name "GitHub Action" # Download the .vsix artifacts - - uses: actions/download-artifact@v6 + - uses: actions/download-artifact@v7 with: pattern: "*-vsix" path: vsix-artifacts @@ -142,7 +142,7 @@ jobs: run: git fetch origin ${{ github.ref }} && git checkout ${{ github.ref }} # 1. Download the artifacts - - uses: actions/download-artifact@v6 + - uses: actions/download-artifact@v7 with: pattern: "*-vsix" path: vsix-artifacts diff --git a/.github/workflows/run-continue-agent.yml b/.github/workflows/run-continue-agent.yml index 6194838fe8b..70b92dff5fe 100644 --- a/.github/workflows/run-continue-agent.yml +++ b/.github/workflows/run-continue-agent.yml @@ -39,4 +39,4 @@ jobs: "repoUrl": "https://github.com/${{ github.repository }}" }') id=$(echo $response | jq -r '.id') - echo "https://hub.continue.dev/agents/$id" + echo "https://hub.continue.dev/hub?type=agents/$id" diff --git a/.gitignore b/.gitignore index 2b6b498ffb1..2a5bc64f80c 100644 --- a/.gitignore +++ b/.gitignore @@ -176,3 +176,6 @@ keys .channels_cache.json .users_cache.json +.copy-status +.copy-log + diff --git a/README.md b/README.md index 00dcc691181..47c4b614357 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ -Get started in [Mission Control](https://hub.continue.dev/agents), [CLI (Headless Mode)](https://docs.continue.dev/cli/quick-start#headless-mode), or [CLI (TUI mode)](https://docs.continue.dev/cli/quick-start#tui-mode) +Get started in [Mission Control](https://hub.continue.dev/hub?type=agents), [CLI (Headless Mode)](https://docs.continue.dev/cli/quick-start#headless-mode), or [CLI (TUI mode)](https://docs.continue.dev/cli/quick-start#tui-mode) ### Quick Install diff --git a/core/indexing/ignore.ts b/core/indexing/ignore.ts index 96b08f6e1bc..e431e24eed9 100644 --- a/core/indexing/ignore.ts +++ b/core/indexing/ignore.ts @@ -38,11 +38,7 @@ export const DEFAULT_SECURITY_IGNORE_FILETYPES = [ // Credential and secret files "*.secret", "*.secrets", - "credentials", - "credentials.*", "auth.json", - "token", - "token.*", "*.token", // Backup files that might contain sensitive data diff --git a/core/indexing/ignore.vitest.ts b/core/indexing/ignore.vitest.ts index 43a271851c4..565db5efd15 100644 --- a/core/indexing/ignore.vitest.ts +++ b/core/indexing/ignore.vitest.ts @@ -47,10 +47,7 @@ describe("isSecurityConcern", () => { it("should detect credential files as security concerns", () => { expect(isSecurityConcern("app.secret")).toBe(true); expect(isSecurityConcern("api.secrets")).toBe(true); - expect(isSecurityConcern("credentials")).toBe(true); - expect(isSecurityConcern("credentials.json")).toBe(true); expect(isSecurityConcern("auth.json")).toBe(true); - expect(isSecurityConcern("token")).toBe(true); expect(isSecurityConcern("api.token")).toBe(true); expect(isSecurityConcern("access.token")).toBe(true); }); @@ -183,6 +180,17 @@ describe("isSecurityConcern", () => { expect(isSecurityConcern("secret-utils.js")).toBe(false); expect(isSecurityConcern("token-validator.ts")).toBe(false); }); + + it("should not flag legitimate source files with token/credentials in name", () => { + expect(isSecurityConcern("tokens.py")).toBe(false); + expect(isSecurityConcern("tokens.go")).toBe(false); + expect(isSecurityConcern("tokens.js")).toBe(false); + expect(isSecurityConcern("credentials.py")).toBe(false); + expect(isSecurityConcern("credentials.go")).toBe(false); + expect(isSecurityConcern("credentials.ts")).toBe(false); + expect(isSecurityConcern("token_manager.py")).toBe(false); + expect(isSecurityConcern("credential_helper.js")).toBe(false); + }); }); describe("Edge cases", () => { diff --git a/docs/agents/create-and-edit.mdx b/docs/agents/create-and-edit.mdx index e0bb505032b..21f79c42573 100644 --- a/docs/agents/create-and-edit.mdx +++ b/docs/agents/create-and-edit.mdx @@ -16,9 +16,9 @@ sidebarTitle: "Create & Edit" - From the Continue Mission Control top navigation bar, select **"+"** β†’ **"New Agent"**. + Navigate to the [New Agent page](https://hub.continue.dev/new?type=agent). - This opens the **Create an Agent (beta)** form with all required fields. + This opens the **Create an Agent** form with all required fields. @@ -30,9 +30,9 @@ sidebarTitle: "Create & Edit" | **Name** | Display name shown in Mission Control | `GitHub PR Agent` | | **Prompt** | First instruction the agent receives | `Open a GitHub PR to fix the specified issue.` | | **Description** | What the agent does | `Creates a pull request and includes AI-generated summaries.` | - | **Tools (MCPs)** | Select built-in or custom MCPs | `GitHub, PostHog, Supabase` | - | **Rules** | Add any organizational rules | `continuedev/gh-pr-commit-workflow, continuedev/summarization` | - | **Model** | Choose a default LLM | `Claude Sonnet 4.5` | + | **Tools ([MCPs](https://hub.continue.dev/hub?type=mcpServers))** | Select built-in or custom MCPs | `GitHub, PostHog, Supabase` | + | **[Rules](https://hub.continue.dev/hub?type=rules)** | Add any organizational rules | `continuedev/gh-pr-commit-workflow` | + | **[Model](https://hub.continue.dev/hub?type=models)** | Choose a default LLM | `Claude Sonnet 4.5` | | **Owner + Slug** | Determines namespace | `my-org/github-pr-agent` | | **Visibility** | Access level | `Public, Organization, or Private` | @@ -70,8 +70,7 @@ sidebarTitle: "Create & Edit" ## Editing an Agent -You can edit any agent you own or that belongs to your organization. - +You can edit any agent you own or that has Organization-level access. From the **Agents** page or Mission Control view, click your agent's name, then select **"Edit Agent"**. diff --git a/docs/agents/intro.mdx b/docs/agents/intro.mdx index 10dcb9f5586..caad558b7af 100644 --- a/docs/agents/intro.mdx +++ b/docs/agents/intro.mdx @@ -23,7 +23,7 @@ Use Mission Control to kick off agents for: - Go to [hub.continue.dev/agents](https://hub.continue.dev/agents) and connect with your GitHub account. + Go to [Mission Control Agents](https://hub.continue.dev/hub?type=agents) and connect with your GitHub account. ![Mission Control Setup](/images/hub/workflows/images/workflows-setup.png) @@ -45,7 +45,7 @@ Use Mission Control to kick off agents for: - Enter a prompt and watch your agent work: + Enter additional instructions and watch your agent work: ``` "Fix the TypeError in api/users.ts and open a PR with tests" @@ -79,7 +79,7 @@ Before creating your own agent, let's see one in action! The fastest way to expe - Go to [hub.continue.dev/agents](https://hub.continue.dev/agents) and: + Go to [Mission Control Agents](https://hub.continue.dev/hub?type=agents) and: - **Connect GitHub** and authorize Continue when prompted - This gives agents access to create PRs in your repositories @@ -203,7 +203,7 @@ Choose the method that fits your workflow: - Team collaboration - One-time tasks - Access at [hub.continue.dev/agents](https://hub.continue.dev/agents) + Access at [hub.continue.dev/agents](https://hub.continue.dev/hub?type=agents) diff --git a/docs/agents/overview.mdx b/docs/agents/overview.mdx index 037b91b14f4..a606351eb90 100644 --- a/docs/agents/overview.mdx +++ b/docs/agents/overview.mdx @@ -67,7 +67,7 @@ You can run Agents in three main ways: Run one-off or scheduled tasks automatically without interaction. ```bash - cn -p --agent my-org/snyk-agent "Run weekly security scan" --auto + cn -p --agent my-org/snyk-agent "Run weekly security scan" ``` Perfect for: CI/CD pipelines, scheduled tasks, webhook integrations @@ -79,34 +79,52 @@ You can run Agents in three main ways: Skip the setup and use battle-tested agents from our cookbook collection: - + + `continuedev/snyk-continuous-ai-agent` + Finds vulnerabilities and opens PRs with fixes + - + + `continuedev/github-project-manager-agent` + Triages issues and manages project workflows + - + + `continuedev/posthog-continuous-ai-agent` + Analyzes user data and creates actionable tasks + - + + `continuedev/netlify-continuous-ai-agent` + Monitors Core Web Vitals and optimizes deployments + - + + `continuedev/supabase-agent` + Audits RLS security and generates migrations + - + + `continuedev/dlt-agent` + Inspects pipelines and debugs load errors + @@ -222,7 +240,7 @@ The practice of using cloud agents, which we call Continuous AI, requires foreth Learn to run agents from the command line - + Access the web interface to manage agents \ No newline at end of file diff --git a/docs/changelog.mdx b/docs/changelog.mdx new file mode 100644 index 00000000000..b9216f14622 --- /dev/null +++ b/docs/changelog.mdx @@ -0,0 +1,40 @@ +--- +title: "Changelog" +description: "Continue Product updates and announcements" +icon: notebook +--- + +## **Discover Opportunities You Can Hand Off: Introducing Proactive Cloud Agents** + +Over the past month, we’ve moved beyond just helping you code. We’ve been laying the groundwork to make Continue an always-on partner that handles the tasks you’d rather not do yourself. + +The result is a shift in how you interact with Continue: less setup, less context switching, and more automation driven directly from your workflow. + + +![Opportunities from Sentry and Snyk in Mission Control](./images/changelog/opportunities.png) + +[Continue Integrations](https://hub.continue.dev/integrations) can now surface actionable work from tools you already rely on. Instead of hunting for issues, Sentry alerts, Snyk vulnerabilities, and GitHub Issues are brought to you as "Opportunities" that you can immediately delegate to an agent for a first pass, and then you can review or approve the results. + + + +### **Automate Your Workflows with Cloud Agents** + +You can now automate workflows across tools like [PostHog](https://hub.continue.dev/integrations/posthog), [Supabase](https://hub.continue.dev/integrations/supabase), [Netlify](https://hub.continue.dev/integrations/netlify), [Atlassian](https://hub.continue.dev/integrations/atlassian), and [Sanity](https://hub.continue.dev/integrations/sanity) using cloud agents. + +Instead of manually stitching together dashboards, alerts, and follow-up tasks, Continue cloud agents can monitor signals, take action, and push work forward automatically. This makes Continue useful not just for coding tasks, but for the operational work that surrounds shipping software. + +Cloud agents are designed to run continuously and reliably, which is why much of the recent work focused on session stability, execution reliability, and onboarding improvements. + +### **Trigger Agents from Slack and GitHub** + +You can now kick off cloud agents directly from [Slack](https://hub.continue.dev/integrations/slack) and [GitHub](https://hub.continue.dev/integrations/github) using @Continue. + +Now, you can kick off cloud agents directly where your team is already talking. Just mention @Continue in a Slack thread or a GitHub comment. The agent will pick up the immediate context and follow your directions, letting you stay in your flow. + + \ No newline at end of file diff --git a/docs/customize/models.mdx b/docs/customize/models.mdx index 85bcf25c42b..bea6aee94db 100644 --- a/docs/customize/models.mdx +++ b/docs/customize/models.mdx @@ -12,7 +12,11 @@ import { ModelRecommendations } from "/snippets/ModelRecommendations.jsx"; - **[Embedding](/customize/model-roles/embeddings)**: Transform code into vector representations for semantic search - **[Reranker](/customize/model-roles/reranking)**: Improve search relevance by ordering results based on semantic meaning -![Models Overview](/images/customize/images/model-blocks-overview-36c30e7e01928d7a9b5b26ff1639c34b.png) + +When creating an agent, you can choose the model for a [new task in Mission Control](https://dub.sh/mc-task) or [workflow](https://dub.sh/agent-workflow). + +If no model is specified, Continue automatically uses the default agent with our recommended model. + ## Recommended Models @@ -20,6 +24,10 @@ import { ModelRecommendations } from "/snippets/ModelRecommendations.jsx"; + +Explore models in [The Hub](https://hub.continue.dev/hub?type=models). + + ## Learn More About Models Continue supports [many model providers](/customize/model-providers/top-level/openai), including Anthropic, OpenAI, Gemini, Ollama, Amazon Bedrock, Azure, xAI, and more. Models can have various roles like `chat`, `edit`, `apply`, `autocomplete`, `embed`, and `rerank`. diff --git a/docs/customize/telemetry.mdx b/docs/customize/telemetry.mdx index 013917322db..12dc39bf2dd 100644 --- a/docs/customize/telemetry.mdx +++ b/docs/customize/telemetry.mdx @@ -1,6 +1,7 @@ --- title: "Telemetry" -description: "Learn about Continue's anonymous telemetry collection practices, what usage data is collected, and how to opt out if you prefer not to share your usage information" +icon: "chart-line" +description: "Learn about Continue's anonymous telemetry collection practices, what usage data is tracked, and how to opt out of data collection to maintain your privacy preferences" --- ## Overview @@ -25,6 +26,25 @@ The following usage information is collected and reported: ## How to Opt Out +### IDE extensions + You can disable anonymous telemetry by toggling "Allow Anonymous Telemetry" off in the user settings. +#### VS Code + Alternatively in VS Code, you can disable telemetry through your VS Code settings by unchecking the "Continue: Telemetry Enabled" box (this will override the Settings Page settings). VS Code settings can be accessed with `File` > `Preferences` > `Settings` (or use the keyboard shortcut `ctrl` + `,` on Windows/Linux or `cmd` + `,` on macOS). + +### CLI + +For `cn`, the Continue CLI, set the environment variable `CONTINUE_TELEMETRY_ENABLED=0` before running commands: + +```bash +export CONTINUE_TELEMETRY_ENABLED=0 +cn +``` + +Or run it inline: + +```bash +CONTINUE_TELEMETRY_ENABLED=0 cn +``` \ No newline at end of file diff --git a/docs/docs.json b/docs/docs.json index afd202c9568..c320a20a20d 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -20,7 +20,7 @@ { "group": "Getting Started", "icon": "rocket-launch", - "pages": ["index"] + "pages": ["index", "changelog"] }, { "group": "Mission Control", @@ -37,7 +37,10 @@ "mission-control/integrations/github", "mission-control/integrations/sentry", "mission-control/integrations/snyk", - "mission-control/integrations/slack-agent" + "mission-control/integrations/slack-agent", + "mission-control/integrations/posthog", + "mission-control/integrations/atlassian", + "mission-control/integrations/netlify" ] }, "mission-control/metrics", @@ -697,10 +700,6 @@ "source": "/customize/deep-dives/vscode-actions", "destination": "/customize/overview" }, - { - "source": "/changelog", - "destination": "/reference" - }, { "source": "/customization/code-config", "destination": "/reference" diff --git a/docs/guides/atlassian-mcp-continue-cookbook.mdx b/docs/guides/atlassian-mcp-continue-cookbook.mdx index fa47a9aed27..a3fd792c780 100644 --- a/docs/guides/atlassian-mcp-continue-cookbook.mdx +++ b/docs/guides/atlassian-mcp-continue-cookbook.mdx @@ -13,6 +13,12 @@ sidebarTitle: "Atlassian Workflows with Continue" - Automate Atlassian workflows with headless CLI runs + + The new [Atlassian Mission Control Integration](/mission-control/integrations/atlassian) enables **[Continuous AI](/guides/continuous-ai)** for project management and documentation - where AI agents autonomously manage Jira issues, sync Confluence documentation, and streamline development workflows. This cookbook focuses on CLI-based workflows, but you can also deploy autonomous agents through Mission Control. + + **Get Started:** Use this cookbook to understand Atlassian MCP fundamentals, then [enable Mission Control](/mission-control/integrations/atlassian) to deploy autonomous project management agents across your organization. + + ## Prerequisites Before starting, ensure you have: diff --git a/docs/guides/netlify-mcp-continuous-deployment.mdx b/docs/guides/netlify-mcp-continuous-deployment.mdx index 830fcc730a8..11318a8dac0 100644 --- a/docs/guides/netlify-mcp-continuous-deployment.mdx +++ b/docs/guides/netlify-mcp-continuous-deployment.mdx @@ -10,6 +10,12 @@ sidebarTitle: "Using Netlify MCP for Performance Optimization" from Netlify Analytics + + The new [Netlify Mission Control Integration](/mission-control/integrations/netlify) enables **[Continuous AI](/guides/continuous-ai)** for deployment automation - where AI agents autonomously manage deployments, monitor site performance, and respond to deployment events. This cookbook focuses on CLI-based performance optimization workflows, but you can also deploy autonomous deployment agents through Mission Control. + + **Get Started:** Use this cookbook to understand Netlify MCP fundamentals, then [enable Mission Control](/mission-control/integrations/netlify) to deploy autonomous deployment management agents across your organization. + + **Did You Know?** Netlify is more than just static hosting! It offers: - [Split Testing](https://docs.netlify.com/site-deploys/split-testing/) for A/B testing branches diff --git a/docs/guides/posthog-github-continuous-ai.mdx b/docs/guides/posthog-github-continuous-ai.mdx index 660b292ca83..d5201f3b058 100644 --- a/docs/guides/posthog-github-continuous-ai.mdx +++ b/docs/guides/posthog-github-continuous-ai.mdx @@ -9,6 +9,12 @@ sidebarTitle: "PostHog Analytics with Continue CLI" issues with the GitHub CLI. + + The new [PostHog Mission Control Integration](/mission-control/integrations/posthog) enables **[Continuous AI](/guides/continuous-ai)** for product analytics - where AI agents autonomously analyze user behavior, identify optimization opportunities, and create data-driven development tasks. This cookbook focuses on CLI-based workflows, but you can also deploy autonomous agents through Mission Control. + + **Get Started:** Use this cookbook to understand PostHog analytics fundamentals, then [enable Mission Control](/mission-control/integrations/posthog) to deploy autonomous analytics-driven agents across your organization. + + ## What You'll Learn This cookbook teaches you to: diff --git a/docs/images/changelog/opportunities.png b/docs/images/changelog/opportunities.png new file mode 100644 index 00000000000..33bac2086f1 Binary files /dev/null and b/docs/images/changelog/opportunities.png differ diff --git a/docs/index.mdx b/docs/index.mdx index ca305486598..2132a76dd23 100644 --- a/docs/index.mdx +++ b/docs/index.mdx @@ -48,6 +48,8 @@ description: "Create, run, and automate AI agents across Mission Control, termin +You can go to [Mission Control Integrations](https://hub.continue.dev/integrations) for the full list of tools you can connect, or for more information see [the docs page](https://docs.continue.dev/mission-control/integrations). + ## Continue CLI diff --git a/docs/mission-control/configs/create-a-config.mdx b/docs/mission-control/configs/create-a-config.mdx index 33845b71103..c9b7bd56e88 100644 --- a/docs/mission-control/configs/create-a-config.mdx +++ b/docs/mission-control/configs/create-a-config.mdx @@ -6,7 +6,7 @@ sidebarTitle: "Create a Config" ## How to Create a Config from Scratch -To create an config from scratch, select β€œNew config” in the top bar. +To create a config from scratch, navigate to [your config settings](https://hub.continue.dev/settings/configs). Choose a name, slug, description, and icon for your config. diff --git a/docs/mission-control/configs/intro.mdx b/docs/mission-control/configs/intro.mdx index b812c5df1d3..3ebb7ce84a7 100644 --- a/docs/mission-control/configs/intro.mdx +++ b/docs/mission-control/configs/intro.mdx @@ -43,7 +43,7 @@ Configs are flexible containers that combine multiple components to create power ## How Configs Work -Think of configs as recipes for AI assistance. Each config defines: +Each config defines: - **What models** to use for different tasks (chat, autocomplete, code editing) - **Which tools** are available through MCP servers @@ -56,7 +56,7 @@ This flexibility lets you create specialized setups for different contexts, like When creating configs, you can set visibility levels: -- **Personal**: Only you can see and use the config +- **Private**: Only you (and your org admins) can see and use the config - **Public**: Anyone can discover and use your config - **Organization**: Members of your organization can access the config diff --git a/docs/mission-control/governance/pricing.mdx b/docs/mission-control/governance/pricing.mdx index e56ef351783..ff21b8f268c 100644 --- a/docs/mission-control/governance/pricing.mdx +++ b/docs/mission-control/governance/pricing.mdx @@ -1,6 +1,6 @@ --- title: "Pricing" -description: "Continue Mission Control pricing plans for individuals, teams, and enterprises, including the Models Add-On that provides access to frontier AI models for a flat monthly fee" +url: "https://hub.continue.dev/pricing" --- ## Solo diff --git a/docs/mission-control/index.mdx b/docs/mission-control/index.mdx index e550aaf7d4b..54e011a9f57 100644 --- a/docs/mission-control/index.mdx +++ b/docs/mission-control/index.mdx @@ -120,7 +120,7 @@ Agents are built from reusable components that you can create, share, and custom - Components are the building blocks for agents that you can create new or remix from an existing component. When you create a component in mission control, it becomes available according to the permissions you set: Personal, Public, or Organization. + When you create a component for your agent in Mission Control, it becomes available according to the permissions you set: Personal, Public, or Organization. diff --git a/docs/mission-control/integrations/atlassian.mdx b/docs/mission-control/integrations/atlassian.mdx new file mode 100644 index 00000000000..46f6d094065 --- /dev/null +++ b/docs/mission-control/integrations/atlassian.mdx @@ -0,0 +1,302 @@ +--- +title: "Atlassian Integration" +description: "Connect Jira, Confluence, and Compass to automate project management with Continue Agents" +--- + +## Overview + +Connect Jira, Confluence, and Compass to Continue Mission Control to enable agents to manage issues, search tickets, summarize pages, and bulk-process tasks using natural language. When Atlassian is enabled, Continue can automatically translate technical code changes into business-friendly updates, keeping stakeholders informed. + + + + - Automatically update Jira tickets with business-friendly summaries on PR merge + - Search issues and create tickets using natural language + - Summarize Confluence pages and documentation + - Bulk-process tasks across multiple tickets + - Triage incoming issues automatically + - Generate documentation from code changes + - Track development progress with stakeholder-friendly language + + + +## Setup + + + + + + Go to your [Atlassian Integration Settings](https://hub.continue.dev/integrations/atlassian). + + + + + + Click "Connect" to Atlassian. + + + + + In the Atlassian authorization screen: + 1. Log into your Atlassian account + 2. Review the requested permissions + 3. Click "Authorize" to complete the connection + + + + + + ## Workflows + +### Update Jira Tickets + +**Trigger**: On PR merge +**Description**: Connect Jira, Confluence, and Compass to your agents. Search issues, create tickets, summarize pages, and bulk-process tasks with natural language. + +When a pull request is merged, this workflow automatically: + +1. **Extracts Jira Ticket ID** - Finds the ticket reference from: + - PR title (e.g., `[PROJ-123] Add feature`) + - Branch name (e.g., `feature/PROJ-123-description`) + - PR description or comments + +2. **Analyzes Code Changes** - Reviews the merged PR to identify: + - What changed (files, features, components) + - Why it matters (business value, problem solved) + - Impact (user-facing changes, performance improvements) + - Risk level (Low/Medium/High based on scope) + +3. **Creates Business Summary** - Translates technical changes into clear, non-technical language: + - Focuses on **outcomes over implementation** + - Highlights **business value** and user benefits + - Uses **plain language** avoiding technical jargon + - Explains who benefits and how + +4. **Updates Jira Ticket** - Posts a formatted comment with: + - Business-friendly summary of what was accomplished + - Key changes in business terms + - Impact on users or stakeholders + - Technical metadata (files changed, merge details, PR link) + +5. **Comments on PR** - Adds a link back to the updated Jira ticket + + + + Technical code changes are often difficult for stakeholders to understand. This workflow bridges the gap between development and business by: + - Keeping product managers informed without technical details + - Providing business stakeholders with clear progress updates + - Reducing manual ticket updates for developers + - Creating a clear audit trail of work completed + + + + + + **Smart Translation**: The agent automatically converts technical terms into business language. For example, "Refactored authentication module" becomes "Users can now stay logged in longer without interruptions." + + + +## Use Cases + +You can also create your own Atlassian-connected agents in Mission Control. Here are some examples for these use cases: + + + + + **Task Example**: "Create Jira tickets for all TODO comments in the codebase with priority based on code location" + + **What the Agent Does**: + - Scans codebase for TODO and FIXME comments + - Creates Jira issues with relevant context + - Links issues to code files and line numbers + - Assigns priority based on file criticality + + **Run in Mission Control**: Schedule weekly or after major releases + + + + +Streamline sprint planning with intelligent automation: + + + + **Task Example**: "Analyze backlog items and create a proposed sprint plan based on team velocity and priorities" + + **What the Agent Does**: + - Reviews open Jira issues and their estimates + - Considers team capacity and historical velocity + - Groups related tickets together + - Creates a proposed sprint with balanced workload + + **Run in Mission Control**: Run before sprint planning meetings + + + + + + **Task Example**: "Update Confluence API documentation to match current OpenAPI spec" + + **What the Agent Does**: + - Parses API specifications from codebase + - Compares with existing Confluence documentation + - Updates or creates Confluence pages with changes + - Notifies team of significant API changes + + **Run in Mission Control**: Trigger on API spec changes or schedule weekly + + + + + + **Task Example**: "Generate release notes from closed Jira tickets since last release and publish to Confluence" + + **What the Agent Does**: + - Queries Jira for tickets closed since last release + - Categorizes changes (features, fixes, improvements) + - Generates formatted release notes + - Creates or updates Confluence release page + + **Run in Mission Control**: Trigger manually before releases + + + + + + **Task Example**: "Analyze current sprint progress and identify blocked or at-risk items" + + **What the Agent Does**: + - Reviews sprint board status + - Identifies tickets without recent updates + - Flags dependencies and blockers + - Generates summary report in Confluence + + **Run in Mission Control**: Schedule daily during active sprints + + + + +## Running Atlassian Agents in Mission Control +You can run Supabase-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows): + + + + + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. + + + + +## Optional Workflow Enhancements + + + + +Automatically add Jira labels based on code changes: + +- `frontend-update` - UI/UX changes detected +- `backend-update` - API or database changes +- `bugfix` - Resolves bugs or issues +- `feature` - New functionality added +- `security` - Security improvements +- `performance` - Performance optimizations + + + +Tag relevant people in Jira based on components modified: + +- Payment processing changes β†’ `@finance-team` +- User interface changes β†’ `@product-manager` +- Security updates β†’ `@security-lead` +- API modifications β†’ `@api-architect` + + + +Accumulate business-friendly summaries for release documentation: + +- Store formatted summaries in `.release-notes/{version}/` +- Automatically compile summaries into Confluence release pages +- Generate customer-facing changelog from Jira updates +- Include impact metrics and user benefits + + + +Log metadata for reporting and analytics: + +- Time from ticket creation to PR merge +- Number of files changed per ticket +- Business impact category (high/medium/low) +- Stakeholder engagement (comments, mentions) +- Risk level distribution across releases + + + +## Troubleshooting + + + + **Problem**: Agent can't connect to Atlassian services + + **Solutions**: + - Verify API token hasn't expired + - Check that email matches the token's account + - Ensure domain includes `.atlassian.net` + - Regenerate token if necessary + + + + + + **Problem**: Agent can't create or update items + + **Solutions**: + - Verify your Atlassian account has appropriate permissions + - Check project-level permissions in Jira + - Ensure space permissions are correct in Confluence + - Contact your Atlassian admin if needed + + + + + + **Problem**: Agent reports Jira issues don't exist + + **Solutions**: + - Verify issue keys are correct (e.g., PROJECT-123) + - Check that you have access to the project + - Ensure issues aren't in a restricted project + - Verify your API token has read access + + + + + + **Problem**: Documentation changes aren't reflected in Confluence + + **Solutions**: + - Check that you have edit permissions for the space + - Verify page IDs are correct + - Ensure Confluence Cloud API is accessible + - Review agent logs for specific error messages + + + + + + **Problem**: Agent hits Atlassian API rate limits + + **Solutions**: + - Reduce frequency of automated workflows + - Batch operations where possible + - Implement exponential backoff in agent logic + - Consider upgrading Atlassian plan for higher limits + + + +## Resources + + + + + Comprehensive guide to project management automation with Atlassian and Continue + + + diff --git a/docs/mission-control/integrations/github.mdx b/docs/mission-control/integrations/github.mdx index 06f2a2cd266..168b098d601 100644 --- a/docs/mission-control/integrations/github.mdx +++ b/docs/mission-control/integrations/github.mdx @@ -166,7 +166,7 @@ You can run GitHub-connected agents in two ways: Trigger agents on-demand for specific tasks: -1. Go to [Mission Control Agents](https://hub.continue.dev/agents) +1. Go to [Mission Control Agents](https://hub.continue.dev/hub?type=agents) 2. Select or create a GitHub-enabled agent 3. Click "Run Agent" and provide your task description 4. Monitor progress and review results in real-time diff --git a/docs/mission-control/integrations/index.mdx b/docs/mission-control/integrations/index.mdx index 9bb796866fe..b2475f919e1 100644 --- a/docs/mission-control/integrations/index.mdx +++ b/docs/mission-control/integrations/index.mdx @@ -1,6 +1,6 @@ --- title: "Overview" -description: "Connect GitHub, Slack, Sentry, and Snyk to power richer Agents, Tasks, and Workflows." +description: "Connect GitHub, Slack, Sentry, Snyk, PostHog, Atlassian, and Netlify to power richer Agents, Tasks, and Workflows." sidebarTitle: "Overview" --- @@ -8,7 +8,7 @@ sidebarTitle: "Overview" Integrations let Continue connect to the tools you already use, so Agents can read code, open pull requests, send messages, react to real-world events, and more. -Mission Control currently supports four first-class integrations: +Mission Control currently supports these first-class integrations: @@ -20,7 +20,7 @@ Mission Control currently supports four first-class integrations: Mention @continue to kick off Agents directly from Slack. - + Trigger Agents automatically when new Sentry issues appear. @@ -28,10 +28,28 @@ Mission Control currently supports four first-class integrations: Detect and fix security vulnerabilities automatically. + + Detect telemetry changes in merged PRs and creates or updates corresponding dashboards. + + + + Connect Jira and Confluence to automate project management and docs. + + + + Optimize site by comparing performance on PR preview and production Netlify deploys. + + + Set up workflows that validate content and schemas on every PR. + + + Monitor database performance, generate migrations, and optimize queries automatically. + ## Next Steps - + + Connect your favorite tools to Mission Control and unlock new Agent capabilities. \ No newline at end of file diff --git a/docs/mission-control/integrations/netlify.mdx b/docs/mission-control/integrations/netlify.mdx new file mode 100644 index 00000000000..28effdf8013 --- /dev/null +++ b/docs/mission-control/integrations/netlify.mdx @@ -0,0 +1,395 @@ +--- +title: "Netlify Integration" +description: "Give your agents access to Netlify's API and CLI to manage deployments and optimize website performance" +--- + +## Overview + +Connect Netlify to Continue Mission Control to give your agents access to Netlify's API and CLI. Create projects, deploy applications, configure domains, and manage infrastructure with natural language. When Netlify is enabled, Continue can analyze bundle sizes, audit performance, and prevent performance regressions. + + + + - Run Lighthouse audits on PR previews and production + - Compare bundle sizes and analyze performance regressions + - Automatically detect configuration issues (minification, caching, tree-shaking) + - Block PRs that introduce critical performance problems + - Generate detailed performance reports with actionable fixes + - Create projects and manage domains with natural language + - Configure build settings and environment variables + + + +## Setup + + + + + + Go to your [Integrations Settings](https://hub.continue.dev/integrations). + + + + + + Click "Connect" next to Netlify. You'll need: + + - **Personal Access Token**: Generate from Netlify account settings + + + + + + To create a personal access token: + 1. Log into your [Netlify account](https://app.netlify.com) + 2. Go to **User Settings** β†’ **Applications** β†’ **Personal access tokens** + 3. Click "New access token" + 4. Give it a descriptive name (e.g., "Continue Mission Control") + 5. Copy the token immediately + + + + + + Paste your access token into the integration form and click "Create Connection" + + + + + + + + **Token Scope**: Personal access tokens have full access to your Netlify account. Create a token specifically for Continue to maintain security and easy revocation if needed. + + + +## Workflows + +### Optimize Website Performance + +**Trigger**: On PR merge +**Description**: Run Lighthouse audits on PR preview and production to compare performance + +When a pull request is merged, this performance-focused workflow automatically: + +1. **Retrieves Deployment Information** - Uses Netlify MCP to get: + - Production deploy URL and metadata + - Deploy preview URL for the PR branch + - Build artifacts and configuration + +2. **Analyzes Bundle Sizes** - Compares JavaScript and CSS bundles: + - Downloads and measures all asset files + - Calculates total bundle size for both deploys + - Identifies largest individual files + - Detects unminified or bloated assets + +3. **Reviews Build Configuration** - Examines: + - `vite.config.ts` or `webpack.config.js` for minification settings + - `netlify.toml` for cache headers and build commands + - `package.json` for redundant or oversized dependencies + +4. **Identifies Performance Issues** - Classifies problems by severity: + - πŸ”΄ **Critical**: Minification disabled, bundle size >50% increase, no caching + - ⚠️ **Warning**: No code splitting, large chunks >500KB, 20-50% bundle increase + - ℹ️ **Info**: Suboptimal configurations, bundle size less than 20% increase + +5. **Generates Actionable Report** - Posts to PR with: + - Bundle size comparison table + - Root cause analysis with file references and line numbers + - Code fixes that can be copy-pasted + - Merge recommendation (Approved/Review Needed/Blocked) + - Expected improvements from each fix + + + + Performance regressions often slip through code review because: + - Bundle size increases aren't visible in diffs + - Configuration errors (disabled minification) go unnoticed + - Redundant dependencies accumulate over time + + This workflow catches these issues automatically before they reach production. + + + + + + **Smart Analysis**: The agent doesn't just report bundle sizesβ€”it analyzes build configurations, identifies root causes, and provides specific code fixes with line numbers. + + + +### Performance Issue Classification + +The agent categorizes issues by severity: + + + + **Issues that must be fixed before merging:** + - Minification disabled (`minify: false`) + - Tree-shaking disabled (`treeshake: false`) + - No caching headers (`Cache-Control: no-cache`) + - Production source maps enabled + - Bundle size increase >50% + - Redundant dependencies (lodash + underscore, moment + date-fns + dayjs) + + **Action**: Agent recommends blocking the merge + + + + + + **Issues that need attention:** + - No code splitting (single large bundle) + - Suboptimal cache headers (max-age too low) + - Large individual chunks (>500KB) + - Unoptimized images + - Bundle size increase 20-50% + + **Action**: Agent recommends careful review before merging + + + + + + **Non-blocking improvements:** + - Could use better code splitting + - Modern browser features could replace polyfills + - CSS not extracted/split + - Bundle size increase less than 20% + + **Action**: Agent approves merge but suggests optimizations + + + +## Use Cases + +### Infrastructure as Code + +Manage Netlify infrastructure with natural language: + + + + **Task Example**: "Create a new Netlify site for the marketing-site repo with custom domain marketing.company.com" + + **What the Agent Does**: + - Creates new Netlify project via API + - Connects to specified Git repository + - Configures build settings and commands + - Sets up custom domain and SSL + - Configures environment variables + + **Run in Mission Control**: Run manually when creating new projects + + + +### Build Configuration Optimization + +Automatically optimize build settings: + + + + **Task Example**: "Audit all Netlify sites for missing cache headers and suboptimal build configs" + + **What the Agent Does**: + - Reviews `netlify.toml` and build settings + - Checks cache header configurations + - Identifies missing optimizations + - Generates recommended configuration changes + - Can apply fixes automatically + + **Run in Mission Control**: Run monthly or after adding new sites + + + +### Performance Auditing + +Prevent performance regressions before deployment: + + + + **Task Example**: "Analyze bundle sizes and configurations, block PRs with critical performance issues" + + **What the Agent Does**: + - Compares preview and production bundle sizes + - Detects disabled minification or tree-shaking + - Identifies redundant dependencies (lodash + underscore, multiple date libraries) + - Checks cache configuration + - Posts detailed report with fixes to PR + - Recommends approval, review, or blocking merge + + **Run in Mission Control**: Triggered automatically on PR merge + + + +### Dependency Analysis + +Identify and fix problematic dependencies: + + + + **Task Example**: "Find redundant or oversized dependencies across all projects" + + **What the Agent Does**: + - Scans `package.json` files in all Netlify sites + - Identifies duplicate functionality (multiple date/utility libraries) + - Finds oversized packages (>100KB) + - Suggests lightweight alternatives + - Calculates potential bundle size savings + + **Run in Mission Control**: Run quarterly for dependency cleanup + + + +### Continuous Performance Monitoring + +Track performance trends over time: + + + + **Task Example**: "Generate a performance trend report for the last 30 deploys" + + **What the Agent Does**: + - Queries deploy history via Netlify MCP + - Tracks bundle size over time + - Identifies gradual performance degradation + - Highlights deploysthat introduced regressions + - Generates visualization of trends + + **Run in Mission Control**: Schedule weekly for ongoing monitoring + + + +### Domain and SSL Management + +Manage domains using natural language: + + + + **Task Example**: "Add www.example.com to my site and configure SSL" + + **What the Agent Does**: + - Configures custom domains via Netlify API + - Sets up SSL certificates automatically + - Configures DNS records and redirects + - Verifies domain setup and SSL status + + **Run in Mission Control**: Run manually when adding domains + + + +## Running Netlify Agents in Mission Control + +You can run Netlify-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows): + + + + + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. + + + + +### 2. Automated Workflows + +Set up agents to run automatically: + +- **PR-triggered**: Audit performance when pull requests are merged +- **Scheduled**: Run weekly bundle size audits across all sites +- **Manual**: Analyze specific deploys or troubleshoot performance issues + + + + Start with manual performance audits on specific PRs to refine your thresholds, then enable automated workflows to catch all performance regressions. + + + + +## Troubleshooting + + + + **Problem**: Agent can't connect to Netlify API + + **Solutions**: + - Verify personal access token is correct + - Check that token hasn't been revoked + - Ensure token has necessary permissions + - Try regenerating the token + + + + + + **Problem**: Deploy preview is still building when agent tries to analyze + + **Solutions**: + - Verify you have deploy permissions for the site + - Check that the site exists and is accessible + - Ensure build hooks are configured if using webhooks + - Review Netlify API rate limits + + + + + + **Problem**: Agent can't find deploy preview for the branch + + **Solutions**: + - Verify branch name is correct + - Check that PR has triggered a Netlify build + - Ensure deploy previews are enabled in Netlify settings + - Confirm the repository is connected to Netlify + + + + + + **Problem**: Agent can't download or analyze bundles + + **Solutions**: + - Verify deploy URLs are accessible + - Check for authentication requirements on preview + - Ensure assets are being generated correctly + - Review build logs for errors + + + + + + **Problem**: Agent can't locate deploy previews + + **Solutions**: + - Verify branch name is correct + - Check that deploy previews are enabled for the site + - Ensure the commit has triggered a build + - Wait for build to complete before querying + + + + + + **Problem**: Can't retrieve site performance metrics + + **Solutions**: + - Verify Netlify Analytics is enabled for the site + - Check that your plan includes analytics access + - Ensure time range for queries is valid + - Review API documentation for metrics endpoints + + + +## Support & Resources + + + + + + Complete guide to continuous deployment automation with Netlify and Continue + + + + + + Combine Netlify with GitHub for end-to-end CI/CD automation + + + + diff --git a/docs/mission-control/integrations/posthog.mdx b/docs/mission-control/integrations/posthog.mdx new file mode 100644 index 00000000000..52f10379220 --- /dev/null +++ b/docs/mission-control/integrations/posthog.mdx @@ -0,0 +1,238 @@ +--- +title: "PostHog Integration" +description: "Leverage product analytics and feature flags with Continue Agents connected to PostHog" +--- + +## Overview + +Connect PostHog to Continue Mission Control to enable agents to analyze user behavior, manage feature flags, and optimize product experiences. When PostHog is enabled, Continue can analyze product metrics, automate feature rollouts, and create data-driven development tasks. + + + + - Analyze user behavior patterns and create optimization tasks + - Manage feature flags and gradual rollouts + - Generate insights from product analytics + - Automate A/B test analysis and reporting + - Monitor conversion funnels and user journeys + - Create PRs based on analytics insights + + + +## Setup + + + + + + Go to your [Integrations Settings](https://hub.continue.dev/integrations). + + + + + + Click "Connect" next to PostHog. You'll need the following credentials: + + - **Project API Key**: Your PostHog project API key + - **Host URL**: Your PostHog instance URL (e.g., `https://app.posthog.com` or your self-hosted URL) + + + + + + In your PostHog account: + 1. Navigate to Project Settings + 2. Find your Project API Key + 3. Copy your PostHog instance URL + 4. Paste both values into Mission Control + + + + + + Click "Create Connection" and verify that Continue can access your PostHog data + + + + + + + + **How to get these credentials:** + 1. Log into your PostHog account + 2. Go to **Settings** β†’ **Project API Key** + 3. Copy your API key + 4. Note your instance URL (shown in the browser address bar) + + + +## Use Cases + +### Analytics-Driven Development + +Create agents that analyze user behavior and generate development tasks: + + + + **Task Example**: "Analyze the checkout flow conversion rate and identify areas for improvement" + + **What the Agent Does**: + - Retrieves funnel data from PostHog + - Identifies drop-off points in the user journey + - Analyzes user session recordings at problem areas + - Creates issues or PRs with optimization recommendations + + **Run in Mission Control**: Schedule weekly or trigger after significant traffic changes + + + +### Feature Flag Management + +Automate feature rollout based on metrics: + + + + **Task Example**: "Monitor new checkout feature performance and increase rollout percentage if metrics are positive" + + **What the Agent Does**: + - Tracks key metrics for the feature flag + - Compares performance against baseline + - Automatically adjusts rollout percentage + - Alerts team if anomalies are detected + + **Run in Mission Control**: Set up as continuous monitoring workflow + + + +### Performance Monitoring + +Track and respond to performance issues: + + + + **Task Example**: "Analyze page load times and create issues for pages slower than 3 seconds" + + **What the Agent Does**: + - Queries PostHog for performance metrics + - Identifies slow-loading pages + - Analyzes common characteristics of slow pages + - Creates prioritized issues with performance data + + **Run in Mission Control**: Schedule daily performance audits + + + +### A/B Test Analysis + +Automate experiment analysis and reporting: + + + + **Task Example**: "Analyze the results of the new homepage A/B test and determine statistical significance" + + **What the Agent Does**: + - Retrieves experiment data from PostHog + - Performs statistical analysis + - Generates comprehensive report with visualizations + - Creates recommendations for next steps + + **Run in Mission Control**: Trigger when experiment reaches sample size + + + +### User Feedback Loop + +Connect analytics insights to code improvements: + + + + **Task Example**: "Identify the top 5 most frustrating user experiences based on rage clicks and session replays" + + **What the Agent Does**: + - Analyzes user session data for frustration signals + - Reviews session replays of problematic interactions + - Identifies common UX issues + - Creates detailed bug reports with user context + + **Run in Mission Control**: Run weekly to catch emerging patterns + + + +## Running PostHog Agents in Mission Control + +You can run PostHog-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows): + + + + + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. + + + + +## Troubleshooting + + + + **Problem**: Agent returns errors when trying to fetch analytics + + **Solutions**: + - Verify API key is correct and hasn't expired + - Check that the project API key has read permissions + - Ensure host URL includes protocol (https://) + - Confirm your PostHog plan includes API access + + + + + + **Problem**: Data retrieved doesn't match PostHog dashboard + + **Solutions**: + - Check that you're querying the correct time range + - Verify PostHog data is fully processed (may have slight delay) + - Ensure filters and breakdowns match your dashboard + - Review agent logs for query parameters + + + + + + **Problem**: Agent can't modify feature flags + + **Solutions**: + - Verify API key has write permissions + - Check that you're using a Personal API Key (not Project API Key) for modifications + - Ensure feature flag exists before attempting updates + - Review PostHog API documentation for flag management + + + + + + **Problem**: Can't connect to self-hosted PostHog instance + + **Solutions**: + - Verify host URL is accessible from Mission Control + - Check firewall rules allow Continue's IP addresses + - Ensure SSL certificate is valid if using HTTPS + - Test API access with curl or Postman first + + + +## Support & Resources + + + + + + Complete guide to building product-led development workflows with PostHog and Continue + + + + + + Combine PostHog with GitHub for data-driven development automation + + + + diff --git a/docs/mission-control/integrations/sanity.mdx b/docs/mission-control/integrations/sanity.mdx new file mode 100644 index 00000000000..0a8a2ce83ac --- /dev/null +++ b/docs/mission-control/integrations/sanity.mdx @@ -0,0 +1,349 @@ +--- +title: "Sanity Integration" +description: "Automate content management and schema validation with Continue Agents connected to Sanity" +--- + +## Overview + +Connect Sanity to Continue Mission Control to enable agents to manage content, validate schemas, execute GROQ queries, and automate your content workflows. When Sanity is enabled, Continue can explore document types, update content, handle migrations, and maintain content quality across your projects. + + + + - Automatically validate content and schemas on every PR + - Execute GROQ queries to analyze and update content + - Generate documentation from your content model + - Manage content migrations between environments + - Monitor content quality and consistency + - Automate content creation and updates + + + +## Setup + + + + + + Go to [the Sanity Integrations](https://hub.continue.dev/integrations/sanity). + + + + + + "Connect" to Sanity. The integration uses OAuth authentication through the Sanity MCP. + + + + + + Select the Sanity organization and project to connect + + + + + + After connecting, set up your first workflow. The integration supports: + - **On PR open**: Validate schemas and content when PRs are created + - **Manual tasks**: Run content operations on-demand + - **Scheduled jobs**: Periodic content audits and maintenance + + + + + + +## Workflows + +### Update Sanity schema docs + +**Trigger**: On PR open +**Description**: Skip the GROQ lookup. Agents manage your content through conversation. + +When a pull request modifies schema files (`schemas/**/*.{ts,js,tsx,jsx}`, `schemaTypes/**/*`, or `sanity.config.{ts,js}`), this workflow automatically: + +1. **Connects to Sanity** - Fetches the complete current schema using Sanity MCP +2. **Analyzes Changes** - Identifies new, modified, or removed document types +3. **Updates Documentation** - Generates comprehensive documentation including: + - Complete field descriptions and validation rules + - Relationship diagrams between document types + - GROQ query examples for each type + - Usage guidance for developers and content teams +4. **Commits to PR Branch** - Adds documentation directly to the existing PR branch +5. **Posts PR Comment** - Provides a detailed summary of documented changes + + + + For each document type, the agent generates: + - **Field Reference**: Every field with type, validation, and description + - **Relationships**: Visual diagram showing references between types + - **Query Examples**: Practical GROQ queries for common operations + - **Studio Behavior**: How fields appear and function in Sanity Studio + - **Validation Rules**: All constraints and requirements + - **Usage Notes**: Important gotchas and best practices + + + + + + **Documentation Location**: By default, docs are created at `docs/content-model.md`. The agent searches for existing documentation in several standard locations and preserves your structure. + + + +#### Example PR Comment + +After updating the documentation, the agent posts a comprehensive comment like this: + +```markdown +## πŸ“ Schema Documentation Updated + +I've automatically updated the content model documentation based on the schema changes in this PR. + +### πŸ“‹ Changes Documented + +**New Document Types** (2) +- ✨ `product` - E-commerce product listings +- ✨ `review` - Customer product reviews + +**Modified Document Types** (1) +- πŸ“ `article` - Added `featured` boolean field, updated `title` validation + +### πŸ“„ Documentation Location + +πŸ“– [View Updated Documentation](./docs/content-model.md) + +### πŸ“Š Schema Overview + +- **Total Document Types**: 12 +- **Total Fields**: 87 +- **Types Modified in this PR**: 3 +- **New Fields Added**: 5 + +### πŸ”— Key Relationships Changed + +- `product` now references `review` (one-to-many) +- `article.featured` added for homepage curation +``` + +## Use Cases + +### Content Validation + +Automatically validate content structure and quality: + + + + **Task Example**: "Check all blog posts for required fields and broken references" + + **What the Agent Does**: + - Scans documents for missing required fields + - Validates references between documents + - Checks for orphaned content + - Reports inconsistencies and suggests fixes + + **Run in Mission Control**: Schedule daily or trigger on content updates + + + +### Schema Management + +Manage and evolve your content schemas: + + + + **Task Example**: "Analyze the product schema and suggest optimizations for better performance" + + **What the Agent Does**: + - Reviews document type definitions + - Identifies unused or redundant fields + - Suggests schema improvements + - Validates relationships and data integrity + + **Run in Mission Control**: Run before major schema changes + + + +### Content Migration + +Automate content structure changes: + + + + **Task Example**: "Migrate all blog posts from the old schema to the new format" + + **What the Agent Does**: + - Reads content from old schema structure + - Transforms data to match new schema + - Validates migrated content + - Creates migration report with any issues + + **Run in Mission Control**: Run manually for controlled migrations + + + +### GROQ Query Execution + +Run complex queries to analyze content: + + + + **Task Example**: "Find all articles published in Q4 2023 with over 10,000 views" + + **What the Agent Does**: + - Executes GROQ queries against your dataset + - Analyzes results and identifies patterns + - Generates reports and visualizations + - Exports data for further analysis + + **Run in Mission Control**: Run on-demand for content insights + + + +### Schema Documentation + +Automatically maintain comprehensive schema documentation: + + + + **Task Example**: "Generate comprehensive documentation for all document types and their relationships" + + **What the Agent Does**: + - Fetches complete schema from Sanity + - Documents every field with types, validation, and descriptions + - Creates Mermaid diagrams showing document relationships + - Includes practical GROQ query examples + - Preserves manual documentation sections + - Updates statistics (total types, field counts) + + **Run in Mission Control**: Triggered automatically on PR open when schema files change + + **Documentation Structure**: + - Table of contents with all document types + - Detailed field tables for each type + - Relationship diagrams + - Query examples for common operations + - Validation rules and Studio configurations + + + +### Content Localization + +Manage multi-language content: + + + + **Task Example**: "Verify all product pages have complete Spanish and French translations" + + **What the Agent Does**: + - Identifies missing translations + - Validates localized field structure + - Reports incomplete content + - Suggests content for translation + + **Run in Mission Control**: Schedule weekly translation audits + + + +## Running Sanity Agents in Mission Control + +You can run Sanity-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows): + + + + + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. + + + + +## Troubleshooting + + + + **Problem**: Agent shows authentication errors + + **Solutions**: + - Re-authenticate through the integrations page (OAuth expires after 7 days) + - For CI/CD workflows, use environment variables instead + - Check that project permissions haven't changed + - Verify the Sanity project still exists + + + + + + **Problem**: Agent can't locate schema definitions + + **Solutions**: + - Verify schema files are in the correct location + - Check that the project ID is correct + - Ensure you're connected to the right dataset + - Confirm schema files are properly exported + + + + + + **Problem**: Queries fail or return unexpected results + + **Solutions**: + - Validate GROQ syntax in Sanity Vision + - Check that referenced fields exist + - Verify document types are spelled correctly + - Review query complexity and optimize if needed + + + + + + **Problem**: Agent reports content doesn't match schema + + **Solutions**: + - Check for recent schema changes + - Verify required fields are present + - Review field type mismatches + - Run migration to update content structure + + + + + + **Problem**: Content migration fails or produces errors + + **Solutions**: + - Test migration on a small subset first + - Validate both old and new schemas + - Check for data type conflicts + - Review migration logs for specific errors + - Use transactions for atomic updates + + + +## Support & Resources + + + + + + Complete guide to content management automation with Sanity and Continue + + + + + + Combine Sanity with GitHub for automated content workflows + + + + + + Official Sanity documentation and guides + + + + + + Learn GROQ query language for content queries + + + + \ No newline at end of file diff --git a/docs/mission-control/integrations/sentry.mdx b/docs/mission-control/integrations/sentry.mdx index a02df609826..72532310a9e 100644 --- a/docs/mission-control/integrations/sentry.mdx +++ b/docs/mission-control/integrations/sentry.mdx @@ -21,18 +21,19 @@ Connect Sentry to Continue Mission Control to enable agents to automatically det - + - Go to your [Integrations Settings](https://hub.continue.dev/integrations). + Go to [Sentry Integration](https://hub.continue.dev/integrations/sentry). - Click "Connect" and select Sentry. You'll need the following credentials: + Click "Connect". You'll need the following credentials: - **Sentry Organization Slug**: Your organization name (e.g., "my-company") - **Auth Token**: Internal integration token from Sentry - **Client Secret**: For webhook signature verification + - **API Key**: For MCP support @@ -69,75 +70,14 @@ Connect Sentry to Continue Mission Control to enable agents to automatically det ## Running Sentry Agents in Mission Control -You can run Sentry-connected agents in two ways: - -### 1. Manual Tasks - -Trigger agents on-demand for error analysis: - -1. Go to [Mission Control Agents](https://hub.continue.dev/agents) -2. Select or create a Sentry-enabled agent -3. Click "Run Agent" and provide your task description -4. Monitor progress and review results in real-time - -**Example Tasks:** -- "Analyze the top 10 errors from the last 24 hours" -- "Create a PR to fix the authentication timeout error" -- "Generate a report on errors affecting mobile users" - -### 2. Automated Workflows - -Set up agents to run automatically: - -- **Webhook-triggered**: Execute when new Sentry errors occur -- **Scheduled**: Run daily or weekly error analysis -- **Threshold-based**: Trigger when error rates exceed limits +You can run Sentry-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows). - Start with manual tasks to refine your prompts, then convert successful workflows to automations for continuous error monitoring. + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. -## Integration with GitHub - -Combine Sentry with GitHub integration for a complete workflow: - - - - - - Enable both Sentry and GitHub integrations in Mission Control - - - - - - Build an agent that: - - Receives Sentry error notifications - - Analyzes the error and finds the problematic code - - Creates a PR with a fix - - Adds the PR link to the Sentry issue - - - - - - Configure the agent to run automatically on new Sentry issues - - - - - -## Monitoring Agent Activity - -Track your agent's error resolution performance: - -1. **View in Mission Control**: See all agent runs and their outcomes -2. **Check Sentry**: Verify that issues are being resolved -3. **Review PRs**: Ensure quality of generated fixes -4. **Monitor Metrics**: Track resolution time and success rate - ## Troubleshooting diff --git a/docs/mission-control/integrations/slack-agent.mdx b/docs/mission-control/integrations/slack-agent.mdx index 1dbe2954077..b386164e6be 100644 --- a/docs/mission-control/integrations/slack-agent.mdx +++ b/docs/mission-control/integrations/slack-agent.mdx @@ -29,9 +29,6 @@ Mention @Continue in any channel with a task description, and it will: ## Setup -Continue's Slack Bot can be installed to a Slack workspace via Mission Control, from:[Integrations Settings](https://hub.continue.dev/integrations) -- Org Integrations Settings - `https://hub.continue.dev/organizations/{your-org}/settings/integrations` - Continue's Slack app is in Beta development. During installation, you will see a warning that it is not yet approved/official. @@ -48,7 +45,7 @@ Continue's Slack Bot can be installed to a Slack workspace via Mission Control, - Click the "Connect" button next to Slack and authorize the app in your workspace. + Click the "Connect" button next in [the Slack Integration ](https://hub.continue.dev/integrations/slack)and authorize the app in your workspace. @@ -115,7 +112,7 @@ Continue will forward your message to the existing agent session instead of crea ## Monitoring Agent Progress -Click the agent link in Slack to view the agent's progress in [Mission Control](https://hub.continue.dev/agents). +Click the agent link in Slack to view the agent's progress in [Mission Control](https://hub.continue.dev/hub?type=agents). You can also click the Slack icon in the agents page to return to the Slack message. diff --git a/docs/mission-control/integrations/snyk.mdx b/docs/mission-control/integrations/snyk.mdx index 9969fa7681b..2cd981d5361 100644 --- a/docs/mission-control/integrations/snyk.mdx +++ b/docs/mission-control/integrations/snyk.mdx @@ -23,7 +23,7 @@ Connect Snyk to Continue Mission Control to enable agents to automatically detec - Go to your [Integrations Settings](https://hub.continue.dev/integrations). + Go to the [Snyk Integration](https://hub.continue.dev/integrations/snyk). @@ -60,44 +60,6 @@ Connect Snyk to Continue Mission Control to enable agents to automatically detec -## Integration with GitHub - -Combine Snyk with GitHub integration for a complete security workflow: - - - - - - Enable both Snyk and GitHub integrations in Mission Control - - - - - - Build an agent that: - - Receives Snyk vulnerability alerts - - Analyzes the security issue in your codebase - - Creates a PR with the security fix - - Adds security impact analysis to the PR - - - - - - Configure the agent to run automatically on new critical vulnerabilities - - - - - -## Monitoring Agent Activity - -Track your agent's security remediation performance: - -1. **View in Inbox**: Navigate to the [Snyk view](https://hub.continue.dev/inbox?view=snyk) to see all Snyk-related agent activity -2. **Check Snyk Dashboard**: Verify that vulnerabilities are being addressed -3. **Review PRs**: Ensure quality and security of generated fixes -4. **Monitor Metrics**: Track mean time to remediation (MTTR) and fix rate ## Troubleshooting diff --git a/docs/mission-control/integrations/supabase.mdx b/docs/mission-control/integrations/supabase.mdx new file mode 100644 index 00000000000..c29d35b6f69 --- /dev/null +++ b/docs/mission-control/integrations/supabase.mdx @@ -0,0 +1,526 @@ +--- +title: "Supabase Integration" +description: "Monitor database security, generate migrations, and optimize queries with Continue Agents connected to Supabase" +--- + +## Overview + +Connect Supabase to Continue Mission Control to enable AI-powered database workflows. When Supabase is enabled, Continue can audit Row Level Security (RLS) policies, generate secure migrations, optimize queries, and maintain database security automatically. + + + + - Automatically audit RLS policies on every PR + - Generate secure migrations for missing security policies + - Identify and fix critical security gaps + - Optimize database queries and schemas + - Monitor database performance and security + - Validate schema changes before deployment + + + +## Setup + + + + + + Go to the [Supabase Integration](https://hub.continue.dev/integrations/supabase). + + + + + + Click "Connect" to Supabase. + + + + + + You'll be redirected to Supabase to authorize Continue: + 1. Select the Supabase organization and project to connect + 2. Review the requested permissions + 3. Click "Authorize" to complete the connection + + + + + + After connecting, set up your first workflow. The integration supports: + - **On PR open**: Audit RLS policies when PRs are created + - **Manual tasks**: Run database operations on-demand + - **Scheduled jobs**: Periodic security audits and performance checks + + + + + +## Workflows + +### Supabase security review + +**Trigger**: On PR open +**Description**: Connect your Supabase workspace to agents that can explore schemas, run queries, create and update documents, and handle migrations using natural language + +When a pull request is opened, this security-focused workflow automatically: + +1. **Analyzes PR Changes** - Identifies affected tables from: + - Migration files (`supabase/migrations/*.sql`) + - SDK/library usage (`.from('table_name')` calls) + - Schema references in type definitions + +2. **Audits RLS Policies** - Uses Supabase MCP to inspect: + - Whether RLS is enabled on affected tables + - Existing policy configurations + - Potential security gaps + +3. **Classifies Security Risks** - Prioritizes findings by severity: + - πŸ”΄ **Critical**: No RLS on tables with sensitive data + - 🟠 **High**: Overly permissive policies (e.g., `USING (true)`) + - 🟑 **Medium**: Missing standard access patterns + - 🟒 **Low**: Policy optimization opportunities + +4. **Generates SQL Migrations** - Creates fixes for Critical/High/Medium issues: + - Enables RLS on unprotected tables + - Adds missing user-scoped policies + - Implements proper access controls + - Includes rollback commands + +5. **Commits Fixes to PR Branch** - Automatically pushes security fixes +6. **Posts Comprehensive Comment** - Explains all findings and changes + + + + Row Level Security (RLS) is Supabase's primary security mechanism. Without proper RLS policies: + - Users can access other users' private data + - Unauthorized modifications can occur + - Sensitive information may be exposed + + This workflow automatically catches these issues before they reach production. + + + + + + **Smart Scoping**: The agent only audits tables directly referenced in your PR changes, not your entire database. This keeps audits focused and efficient. + + + +#### Example PR Comment + +After auditing RLS policies, the agent posts a comprehensive comment like this: + +```markdown +## πŸ”’ Supabase RLS Policy Audit + +### Scope +This audit covers **3 tables** referenced in this PR: +- user_profiles (detected in: migration file) +- comments (detected in: SDK usage in src/api/comments.ts) +- posts (detected in: migration file) + +### Summary +- βœ… 1 table with proper RLS +- ⚠️ 2 tables requiring attention +- πŸ”΄ 1 critical security gap found + +### Findings + +| Table | Risk Level | Issue | Status | +|-------|------------|-------|--------| +| `user_profiles` | πŸ”΄ Critical | No RLS enabled | βœ… Fixed in commit abc123 | +| `comments` | 🟑 Medium | Missing delete policy | βœ… Fixed in commit abc123 | +| `posts` | 🟒 Low | Policy optimization possible | πŸ’‘ Recommendation | + +### Changes Made + +- Enabled RLS on `user_profiles` +- Added policies: `user_profiles_select_own`, `user_profiles_update_own`, `comments_delete_own` +- See full migration: `supabase/migrations/20241118120000_rls_security_fixes.sql` + +### Recommendations (Low Priority) + +- **posts table**: Consider renaming policy `posts_policy_1` to `posts_select_owner` for clarity + +### Policy Patterns Used + +**User-scoped access** (users access only their own data): +\```sql +USING (auth.uid() = user_id) +WITH CHECK (auth.uid() = user_id) +\``` + +### Next Steps + +- βœ… Review the migration file and committed changes +- βœ… Test the policies in your development environment +- βœ… Merge this PR once you've verified the security improvements +``` + +### Security Risk Levels Explained + + + + **What it means**: Tables with no RLS that contain sensitive data + + **Examples**: + - User profiles, emails, or authentication data + - Financial information or payment details + - Private messages or personal content + - New tables created without RLS + + **What the agent does**: Automatically enables RLS and adds basic policies + + + + + + **What it means**: Overly permissive policies that could allow unauthorized access + + **Examples**: + - Policies using `USING (true)` without justification + - Write access without proper validation + - Admin policies that don't verify admin status + - Missing `WITH CHECK` clauses + + **What the agent does**: Generates restrictive policies with proper checks + + + + + + **What it means**: Partial RLS coverage or missing standard patterns + + **Examples**: + - SELECT policy exists but no UPDATE/DELETE + - No user-scoped policies for personal data + - Missing public read policies where appropriate + - Incomplete CRUD policy coverage + + **What the agent does**: Adds missing policies following standard patterns + + + + + + **What it means**: Policies work but could be improved + + **Examples**: + - Poor policy naming conventions + - Inefficient policy logic + - Missing documentation + - Duplicate or redundant policies + + **What the agent does**: Suggests improvements in PR comment (no auto-fix) + + + +## Use Cases + +### Automated Security Audits + +Catch security issues before they reach production: + + + + **Task Example**: Automatically audit RLS on every PR that touches database schema or queries + + **What the Agent Does**: + - Scans PR for database-related changes + - Identifies all affected tables + - Checks RLS status and policies + - Generates fixes for security gaps + - Commits migrations to the PR branch + + **Run in Mission Control**: Triggered automatically on PR open + + + +### Database Migration Generation + +Create secure migrations automatically: + + + + **Task Example**: "Generate a migration to add RLS policies for the new messages table" + + **What the Agent Does**: + - Analyzes table structure and purpose + - Determines appropriate access patterns + - Generates SQL with RLS policies + - Includes proper naming and comments + - Adds rollback commands + + **Run in Mission Control**: Run manually or on schema changes + + + +### Query Optimization + +Improve database performance: + + + + **Task Example**: "Analyze slow queries and suggest optimizations" + + **What the Agent Does**: + - Identifies queries in your codebase + - Checks for missing indexes + - Suggests query rewrites + - Recommends schema changes + - Estimates performance impact + + **Run in Mission Control**: Schedule weekly performance reviews + + + +### Schema Validation + +Ensure schema changes follow best practices: + + + + **Task Example**: "Validate that all new tables follow our schema standards" + + **What the Agent Does**: + - Checks naming conventions + - Validates data types and constraints + - Ensures proper indexes exist + - Verifies RLS is enabled + - Reviews foreign key relationships + + **Run in Mission Control**: Triggered on migration file changes + + + +### Access Pattern Analysis + +Understand how your database is used: + + + + **Task Example**: "Analyze SDK usage patterns and suggest RLS policies" + + **What the Agent Does**: + - Scans codebase for Supabase client calls + - Identifies data access patterns + - Detects security anti-patterns + - Recommends appropriate RLS policies + - Generates migration files + + **Run in Mission Control**: Run before major releases + + + +### Development Environment Sync + +Keep dev and production schemas aligned: + + + + **Task Example**: "Compare dev and production schemas and generate sync migrations" + + **What the Agent Does**: + - Connects to multiple Supabase projects + - Compares schema definitions + - Identifies differences + - Generates sync migrations + - Validates compatibility + + **Run in Mission Control**: Schedule or run manually + + + +## Running Supabase Agents in Mission Control + +You can run Supabase-connected agents in two ways as [one-off tasks](../tasks) or automated [workflows](../workflows): + + + + + Start with manual tasks to refine your content queries, then automate repetitive content management and validation tasks. + + + +## Common RLS Policy Patterns + +The agent recognizes and implements these standard security patterns: + +### User-Scoped Access + +Users can only access their own data: + +```sql +-- SELECT: Users can read their own records +CREATE POLICY "users_select_own" ON user_profiles +FOR SELECT USING (auth.uid() = user_id); + +-- UPDATE: Users can update their own records +CREATE POLICY "users_update_own" ON user_profiles +FOR UPDATE USING (auth.uid() = user_id) +WITH CHECK (auth.uid() = user_id); + +-- DELETE: Users can delete their own records +CREATE POLICY "users_delete_own" ON user_profiles +FOR DELETE USING (auth.uid() = user_id); +``` + +### Admin Override + +Admins can access all data while users access only their own: + +```sql +CREATE POLICY "posts_select_owner_or_admin" ON posts +FOR SELECT USING ( + auth.uid() = user_id OR + auth.jwt() ->> 'role' = 'admin' +); +``` + +### Public Read, Authenticated Write + +Anyone can read, only authenticated users can write: + +```sql +-- Public read access +CREATE POLICY "public_posts_select" ON public_posts +FOR SELECT USING (true); + +-- Authenticated insert only +CREATE POLICY "public_posts_insert_auth" ON public_posts +FOR INSERT WITH CHECK (auth.role() = 'authenticated'); +``` + +### Soft Delete Handling + +Exclude soft-deleted records from queries: + +```sql +CREATE POLICY "posts_select_not_deleted" ON posts +FOR SELECT USING ( + deleted_at IS NULL AND + (auth.uid() = user_id OR is_published = true) +); +``` + +## Troubleshooting + + + + **Problem**: Agent can't connect to Supabase + + **Solutions**: + - Re-authenticate through the integrations page + - Verify OAuth token hasn't expired (expires after 7 days) + - Check project permissions + - Ensure Supabase project is accessible + - For CI/CD, verify environment variables are set + + + + + + **Problem**: Agent reports no tables found to audit + + **Solutions**: + - Verify PR contains database-related changes + - Check that migration files use standard naming + - Ensure SDK calls use `.from('table_name')` pattern + - Review PR for schema/type definition changes + + + + + + **Problem**: Agent identifies issues but doesn't create fixes + + **Solutions**: + - Check if issues are Low priority (agent suggests only) + - Verify agent has write access to repository + - Review error messages in agent logs + - Ensure Supabase MCP connection is working + - Check if migration directory exists + + + + + + **Problem**: Auto-generated RLS policies block legitimate access + + **Solutions**: + - Review and adjust policies in the generated migration + - Add custom logic for your specific access patterns + - Mark tables for manual policy creation if needed + - Provide feedback to improve future policy generation + + + + + + **Problem**: Generated migration conflicts with existing changes + + **Solutions**: + - Review both migrations and merge manually + - Ensure migration timestamps are unique + - Check for duplicate policy names + - Adjust generated migration as needed + + + +## Security Best Practices + + + + **Development vs. Production**: + - Always test RLS policies in development first + - Use separate Supabase projects for dev/staging/prod + - Never disable RLS on production tables + - Review all generated policies before deploying + - Test policies with different user roles + + + + + + Before deploying RLS changes, verify: + - [ ] RLS is enabled on all sensitive tables + - [ ] Policies cover all CRUD operations (SELECT, INSERT, UPDATE, DELETE) + - [ ] User-scoped data includes `auth.uid()` checks + - [ ] Admin overrides verify admin role properly + - [ ] Public access is intentional and documented + - [ ] Policies include `WITH CHECK` clauses for writes + - [ ] Soft deletes filter out `deleted_at IS NOT NULL` + - [ ] Policy names follow naming conventions + - [ ] Rollback commands are included + - [ ] Policies are tested with different user scenarios + + + +## Support & Resources + + + + + + Complete guide to database security automation with Supabase and Continue + + + + + + Combine Supabase with GitHub for automated security workflows + + + + + + Official Supabase Row Level Security documentation + + + + + + Learn about the Supabase MCP Server. + + + + diff --git a/docs/mission-control/tasks.mdx b/docs/mission-control/tasks.mdx index 4dd58fb22e0..9efd782229c 100644 --- a/docs/mission-control/tasks.mdx +++ b/docs/mission-control/tasks.mdx @@ -34,7 +34,7 @@ description: "A Task is a unit of work shared between you and an agent. You trig - Pick the **repository** and **branch** where the Agent will work. + Pick the **repository** and **branch** the Agent branch off of. diff --git a/docs/telemetry.mdx b/docs/telemetry.mdx deleted file mode 100644 index 12dc39bf2dd..00000000000 --- a/docs/telemetry.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "Telemetry" -icon: "chart-line" -description: "Learn about Continue's anonymous telemetry collection practices, what usage data is tracked, and how to opt out of data collection to maintain your privacy preferences" ---- - -## Overview - -The open-source Continue Extensions collect and report **anonymous** usage information to help us improve our product. This data enables us to understand user interactions and optimize the user experience effectively. You can opt out of telemetry collection at any time if you prefer not to share your usage information. - -We utilize [Posthog](https://posthog.com/), an open-source platform for product analytics, to gather and store this data. For transparency, you can review the implementation code [here](https://github.com/continuedev/continue/blob/main/gui/src/hooks/CustomPostHogProvider.tsx) or read our [official privacy policy](https://continue.dev/privacy). - -## Tracking Policy - -All data collected by the open-source Continue extensions is anonymized and stripped of personally identifiable information (PII) before being sent to PostHog. We are committed to maintaining the privacy and security of your data. - -## What We Track - -The following usage information is collected and reported: - -- **Suggestion Interactions:** Whether you accept or reject suggestions (excluding the actual code or prompts involved). -- **Model and Command Information:** The name of the model and command used. -- **Token Metrics:** The number of tokens generated. -- **System Information:** The name of your operating system (OS) and integrated development environment (IDE). -- **Pageviews:** General pageview statistics. - -## How to Opt Out - -### IDE extensions - -You can disable anonymous telemetry by toggling "Allow Anonymous Telemetry" off in the user settings. - -#### VS Code - -Alternatively in VS Code, you can disable telemetry through your VS Code settings by unchecking the "Continue: Telemetry Enabled" box (this will override the Settings Page settings). VS Code settings can be accessed with `File` > `Preferences` > `Settings` (or use the keyboard shortcut `ctrl` + `,` on Windows/Linux or `cmd` + `,` on macOS). - -### CLI - -For `cn`, the Continue CLI, set the environment variable `CONTINUE_TELEMETRY_ENABLED=0` before running commands: - -```bash -export CONTINUE_TELEMETRY_ENABLED=0 -cn -``` - -Or run it inline: - -```bash -CONTINUE_TELEMETRY_ENABLED=0 cn -``` \ No newline at end of file diff --git a/extensions/cli/src/services/GitAiIntegrationService.test.ts b/extensions/cli/src/services/GitAiIntegrationService.test.ts new file mode 100644 index 00000000000..edac165530e --- /dev/null +++ b/extensions/cli/src/services/GitAiIntegrationService.test.ts @@ -0,0 +1,514 @@ +import type { ChildProcess } from "child_process"; +import { EventEmitter } from "events"; + +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +import type { PreprocessedToolCall } from "../tools/types.js"; + +import { GitAiIntegrationService } from "./GitAiIntegrationService.js"; + +// Mock child_process +vi.mock("child_process", () => ({ + exec: vi.fn(), + spawn: vi.fn(), +})); + +// Mock session functions +vi.mock("../session.js", () => ({ + getCurrentSession: vi.fn(), + getSessionFilePath: vi.fn(), +})); + +// Mock serviceContainer +vi.mock("./ServiceContainer.js", () => ({ + serviceContainer: { + getSync: vi.fn(), + }, +})); + +// Mock logger +vi.mock("../util/logger.js", () => ({ + logger: { + debug: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, +})); + +describe("GitAiIntegrationService", () => { + let service: GitAiIntegrationService; + let mockExec: any; + let mockSpawn: any; + let mockGetCurrentSession: any; + let mockGetSessionFilePath: any; + let mockServiceContainer: any; + + beforeEach(async () => { + // Import mocked modules + const childProcess = await import("child_process"); + const session = await import("../session.js"); + const { serviceContainer } = await import("./ServiceContainer.js"); + + mockExec = childProcess.exec as any; + mockSpawn = childProcess.spawn as any; + mockGetCurrentSession = session.getCurrentSession as any; + mockGetSessionFilePath = session.getSessionFilePath as any; + mockServiceContainer = serviceContainer; + + // Setup default mocks + mockGetCurrentSession.mockReturnValue({ + sessionId: "test-session-id", + workspaceDirectory: "/test/workspace", + chatModelTitle: "claude-sonnet-4-5", + }); + + mockGetSessionFilePath.mockReturnValue( + "/test/.continue/sessions/test-session-id.json", + ); + + mockServiceContainer.getSync.mockReturnValue({ + value: { + model: { + model: "claude-sonnet-4-5", + }, + }, + }); + + service = new GitAiIntegrationService(); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + describe("initialization", () => { + it("should check if git-ai is available on initialization", async () => { + mockExec.mockImplementation((_cmd: string, callback: Function) => { + callback(null); // No error = git-ai is available + }); + + const state = await service.initialize(); + + expect(state.isEnabled).toBe(true); + expect(state.isGitAiAvailable).toBe(true); + expect(mockExec).toHaveBeenCalledWith( + "git-ai --version", + expect.any(Function), + ); + }); + + it("should mark git-ai as unavailable if version check fails", async () => { + mockExec.mockImplementation((_cmd: string, callback: Function) => { + callback(new Error("command not found")); // Error = git-ai not available + }); + + const state = await service.initialize(); + + expect(state.isEnabled).toBe(true); + expect(state.isGitAiAvailable).toBe(false); + }); + }); + + describe("trackToolUse", () => { + beforeEach(async () => { + mockExec.mockImplementation((_cmd: string, callback: Function) => { + callback(null); // git-ai is available + }); + await service.initialize(); + }); + + it("should not track non-file-editing tools", async () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Bash", + arguments: { command: "ls" }, + argumentsStr: JSON.stringify({ command: "ls" }), + startNotified: false, + tool: {} as any, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + expect(mockSpawn).not.toHaveBeenCalled(); + }); + + it("should track Edit tool usage", async () => { + const mockProcess = createMockChildProcess(); + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { + resolvedPath: "/test/file.ts", + }, + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + expect(mockSpawn).toHaveBeenCalledWith( + "git-ai", + ["checkpoint", "continue-cli", "--hook-input", "stdin"], + { cwd: "/test/workspace" }, + ); + + // Check that the correct JSON was written to stdin + const writtenData = (mockProcess.stdin!.write as any).mock.calls[0][0]; + const hookInput = JSON.parse(writtenData); + + expect(hookInput).toMatchObject({ + session_id: "test-session-id", + transcript_path: "/test/.continue/sessions/test-session-id.json", + cwd: "/test/workspace", + model: "claude-sonnet-4-5", + hook_event_name: "PreToolUse", + tool_input: { + file_path: "/test/file.ts", + }, + }); + }); + + it("should track MultiEdit tool usage", async () => { + const mockProcess = createMockChildProcess(); + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "MultiEdit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { + file_path: "/test/file.ts", + }, + }, + }; + + await service.trackToolUse(toolCall, "PostToolUse"); + + const writtenData = (mockProcess.stdin!.write as any).mock.calls[0][0]; + const hookInput = JSON.parse(writtenData); + + expect(hookInput.hook_event_name).toBe("PostToolUse"); + expect(hookInput.tool_input.file_path).toBe("/test/file.ts"); + }); + + it("should track Write tool usage", async () => { + const mockProcess = createMockChildProcess(); + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Write", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { + filepath: "/test/newfile.ts", + }, + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + const writtenData = (mockProcess.stdin!.write as any).mock.calls[0][0]; + const hookInput = JSON.parse(writtenData); + + expect(hookInput.tool_input.file_path).toBe("/test/newfile.ts"); + }); + + it("should not track if no file path is found", async () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: {}, // No file path + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + expect(mockSpawn).not.toHaveBeenCalled(); + }); + + it("should not track if service is disabled", async () => { + service.setEnabled(false); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + expect(mockSpawn).not.toHaveBeenCalled(); + }); + + it("should not track if git-ai is unavailable", async () => { + // Reinitialize with git-ai unavailable + mockExec.mockImplementation((_cmd: string, callback: Function) => { + callback(new Error("not found")); + }); + await service.initialize(); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + expect(mockSpawn).not.toHaveBeenCalled(); + }); + + it("should omit model field if model is not available", async () => { + mockServiceContainer.getSync.mockReturnValue({ + value: { + model: null, + }, + }); + + const mockProcess = createMockChildProcess(); + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + await service.trackToolUse(toolCall, "PreToolUse"); + + const writtenData = (mockProcess.stdin!.write as any).mock.calls[0][0]; + const hookInput = JSON.parse(writtenData); + + expect(hookInput.model).toBeUndefined(); + }); + + it("should handle git-ai errors gracefully", async () => { + const mockProcess = createMockChildProcess(1); // Exit with error code + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + // Should not throw + await expect( + service.trackToolUse(toolCall, "PreToolUse"), + ).resolves.toBeUndefined(); + + // Should mark git-ai as unavailable + const state = service.getState(); + expect(state.isGitAiAvailable).toBe(false); + }); + + it("should handle spawn errors gracefully", async () => { + const mockProcess = createMockChildProcess(); + mockSpawn.mockReturnValue(mockProcess); + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + // Trigger an error event + setTimeout(() => { + mockProcess.emit("error", new Error("spawn failed")); + }, 0); + + // Should not throw + await expect( + service.trackToolUse(toolCall, "PreToolUse"), + ).resolves.toBeUndefined(); + + // Should mark git-ai as unavailable + const state = service.getState(); + expect(state.isGitAiAvailable).toBe(false); + }); + }); + + describe("extractFilePathFromToolCall", () => { + it("should extract path from Edit tool", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { resolvedPath: "/test/edit.ts" }, + }, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBe("/test/edit.ts"); + }); + + it("should extract path from MultiEdit tool", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "MultiEdit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { file_path: "/test/multiedit.ts" }, + }, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBe("/test/multiedit.ts"); + }); + + it("should extract path from Write tool", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Write", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { filepath: "/test/write.ts" }, + }, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBe("/test/write.ts"); + }); + + it("should return null if no preprocessResult", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBeNull(); + }); + + it("should return null if no args", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: {} as any, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBeNull(); + }); + + it("should return null for unknown tool", () => { + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "UnknownTool", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: {} as any, + preprocessResult: { + args: { somePath: "/test/file.ts" }, + }, + }; + + const result = service.extractFilePathFromToolCall(toolCall); + expect(result).toBeNull(); + }); + }); + + describe("setEnabled", () => { + it("should enable the service", () => { + service.setEnabled(true); + const state = service.getState(); + expect(state.isEnabled).toBe(true); + }); + + it("should disable the service", () => { + service.setEnabled(false); + const state = service.getState(); + expect(state.isEnabled).toBe(false); + }); + }); +}); + +/** + * Helper function to create a mock ChildProcess + */ +function createMockChildProcess(exitCode: number = 0): ChildProcess { + const mockProcess = new EventEmitter() as any; + + mockProcess.stdin = { + write: vi.fn(), + end: vi.fn(), + }; + + mockProcess.stdout = new EventEmitter(); + mockProcess.stderr = new EventEmitter(); + + // Simulate process completion after a short delay + setTimeout(() => { + mockProcess.emit("close", exitCode); + }, 10); + + return mockProcess; +} diff --git a/extensions/cli/src/services/GitAiIntegrationService.ts b/extensions/cli/src/services/GitAiIntegrationService.ts new file mode 100644 index 00000000000..fd8b64a1e34 --- /dev/null +++ b/extensions/cli/src/services/GitAiIntegrationService.ts @@ -0,0 +1,251 @@ +import { exec, spawn } from "child_process"; + +import { PreprocessedToolCall } from "src/tools/types.js"; + +import { getCurrentSession, getSessionFilePath } from "../session.js"; +import { logger } from "../util/logger.js"; + +import { BaseService } from "./BaseService.js"; +import { serviceContainer } from "./ServiceContainer.js"; +import type { ModelServiceState } from "./types.js"; + +interface GitAiHookInput { + session_id: string; + transcript_path: string; + cwd: string; + model?: string; + hook_event_name: "PreToolUse" | "PostToolUse"; + tool_input: { + file_path: string; + }; +} + +export interface GitAiIntegrationServiceState { + isEnabled: boolean; + isGitAiAvailable: boolean | null; // null = not checked yet +} + +export class GitAiIntegrationService extends BaseService { + constructor() { + super("GitAiIntegrationService", { + isEnabled: true, + isGitAiAvailable: null, + }); + } + + async doInitialize(): Promise { + // Check if git-ai is available on first initialization + const isAvailable = await this.checkGitAiAvailable(); + return { + isEnabled: true, + isGitAiAvailable: isAvailable, + }; + } + + private async checkGitAiAvailable(): Promise { + return new Promise((resolve) => { + try { + exec("git-ai --version", (error) => { + if (error) { + resolve(false); + return; + } + resolve(true); + }); + } catch { + // Handle edge case where exec throws synchronously + resolve(false); + } + }); + } + + /** + * Helper function to call git-ai checkpoint with the given hook input + */ + private async callGitAiCheckpoint( + hookInput: GitAiHookInput, + workspaceDirectory: string, + ): Promise { + const hookInputJson = JSON.stringify(hookInput); + + logger.debug("Calling git-ai checkpoint", { + hookInput, + workspaceDirectory, + }); + + await new Promise((resolve, reject) => { + const gitAiProcess = spawn( + "git-ai", + ["checkpoint", "continue-cli", "--hook-input", "stdin"], + { cwd: workspaceDirectory }, + ); + + let stdout = ""; + let stderr = ""; + + gitAiProcess.stdout?.on("data", (data: Buffer) => { + stdout += data.toString(); + }); + + gitAiProcess.stderr?.on("data", (data: Buffer) => { + stderr += data.toString(); + }); + + gitAiProcess.on("error", (error: Error) => { + reject(error); + }); + + gitAiProcess.on("close", (code: number | null) => { + if (code === 0) { + logger.debug("git-ai checkpoint completed", { stdout, stderr }); + resolve(); + } else { + reject( + new Error(`git-ai checkpoint exited with code ${code}: ${stderr}`), + ); + } + }); + + // Write JSON to stdin and close + gitAiProcess.stdin?.write(hookInputJson); + gitAiProcess.stdin?.end(); + }); + } + + async trackToolUse( + toolCall: PreprocessedToolCall, + hookEventName: "PreToolUse" | "PostToolUse", + ): Promise { + try { + if (!this.currentState.isEnabled) { + return; + } + const isFileEdit = ["Edit", "MultiEdit", "Write"].includes(toolCall.name); + if (!isFileEdit) { + return; + } + + const filePath = this.extractFilePathFromToolCall(toolCall); + if (filePath) { + if (hookEventName === "PreToolUse") { + await this.beforeFileEdit(filePath); + } else if (hookEventName === "PostToolUse") { + await this.afterFileEdit(filePath); + } + } + } catch (error) { + logger.warn("git-ai tool use tracking failed", { + error, + toolCall, + hookEventName, + }); + // Don't throw - allow tool use to proceed without Git AI checkpoint + } + } + + async beforeFileEdit(filePath: string): Promise { + if (!this.currentState.isEnabled) { + return; + } + + // Skip if git-ai is not available + if (this.currentState.isGitAiAvailable === false) { + return; + } + + try { + const session = getCurrentSession(); + const sessionFilePath = getSessionFilePath(); + + // Get current model from ModelService via serviceContainer + const modelState = serviceContainer.getSync("model"); + const modelName = modelState?.value?.model?.model; + + const hookInput: GitAiHookInput = { + session_id: session.sessionId, + transcript_path: sessionFilePath, + cwd: session.workspaceDirectory, + hook_event_name: "PreToolUse", + tool_input: { + file_path: filePath, + }, + }; + + // Only include model if it's available + if (modelName) { + hookInput.model = modelName; + } + + await this.callGitAiCheckpoint(hookInput, session.workspaceDirectory); + } catch (error) { + logger.warn("git-ai checkpoint (pre-edit) failed", { error, filePath }); + // Mark as unavailable if command fails + this.setState({ isGitAiAvailable: false }); + // Don't throw - allow file edit to proceed + } + } + + async afterFileEdit(filePath: string): Promise { + if (!this.currentState.isEnabled) { + return; + } + + // Skip if git-ai is not available + if (this.currentState.isGitAiAvailable === false) { + return; + } + + try { + const session = getCurrentSession(); + const sessionFilePath = getSessionFilePath(); + + // Get current model from ModelService via serviceContainer + const modelState = serviceContainer.getSync("model"); + const modelName = modelState?.value?.model?.model; + + const hookInput: GitAiHookInput = { + session_id: session.sessionId, + transcript_path: sessionFilePath, + cwd: session.workspaceDirectory, + hook_event_name: "PostToolUse", + tool_input: { + file_path: filePath, + }, + }; + + // Only include model if it's available + if (modelName) { + hookInput.model = modelName; + } + + await this.callGitAiCheckpoint(hookInput, session.workspaceDirectory); + } catch (error) { + logger.warn("git-ai checkpoint (post-edit) failed", { error, filePath }); + // Mark as unavailable if command fails + this.setState({ isGitAiAvailable: false }); + // Don't throw - file edit already completed + } + } + + setEnabled(enabled: boolean): void { + this.setState({ isEnabled: enabled }); + } + + extractFilePathFromToolCall(toolCall: PreprocessedToolCall): string | null { + const preprocessed = toolCall.preprocessResult; + if (!preprocessed?.args) return null; + + const args = preprocessed.args; + + // Extract file path based on tool type + if (toolCall.name === "Edit" && args.resolvedPath) { + return args.resolvedPath; + } else if (toolCall.name === "MultiEdit" && args.file_path) { + return args.file_path; + } else if (toolCall.name === "Write" && args.filepath) { + return args.filepath; + } + + return null; + } +} diff --git a/extensions/cli/src/services/index.ts b/extensions/cli/src/services/index.ts index ee79874e0a6..f53a53c8471 100644 --- a/extensions/cli/src/services/index.ts +++ b/extensions/cli/src/services/index.ts @@ -10,6 +10,7 @@ import { AuthService } from "./AuthService.js"; import { ChatHistoryService } from "./ChatHistoryService.js"; import { ConfigService } from "./ConfigService.js"; import { FileIndexService } from "./FileIndexService.js"; +import { GitAiIntegrationService } from "./GitAiIntegrationService.js"; import { MCPService } from "./MCPService.js"; import { ModelService } from "./ModelService.js"; import { ResourceMonitoringService } from "./ResourceMonitoringService.js"; @@ -46,6 +47,7 @@ const agentFileService = new AgentFileService(); const toolPermissionService = new ToolPermissionService(); const systemMessageService = new SystemMessageService(); const artifactUploadService = new ArtifactUploadService(); +const gitAiIntegrationService = new GitAiIntegrationService(); /** * Initialize all services and register them with the service container @@ -310,6 +312,12 @@ export async function initializeServices(initOptions: ServiceInitOptions = {}) { [], // No dependencies for now, but could depend on SESSION in future ); + serviceContainer.register( + SERVICE_NAMES.GIT_AI_INTEGRATION, + () => gitAiIntegrationService.initialize(), + [], // No dependencies + ); + // Eagerly initialize all services to ensure they're ready when needed // This avoids race conditions and "service not ready" errors await serviceContainer.initializeAll(); @@ -372,6 +380,7 @@ export const services = { agentFile: agentFileService, toolPermissions: toolPermissionService, artifactUpload: artifactUploadService, + gitAiIntegration: gitAiIntegrationService, } as const; // Export the service container for advanced usage diff --git a/extensions/cli/src/services/types.ts b/extensions/cli/src/services/types.ts index f9f15f52aa7..b9804bcd26b 100644 --- a/extensions/cli/src/services/types.ts +++ b/extensions/cli/src/services/types.ts @@ -132,6 +132,7 @@ export interface ArtifactUploadServiceState { export type { ChatHistoryState } from "./ChatHistoryService.js"; export type { FileIndexServiceState } from "./FileIndexService.js"; +export type { GitAiIntegrationServiceState } from "./GitAiIntegrationService.js"; /** * Service names as constants to prevent typos @@ -151,6 +152,7 @@ export const SERVICE_NAMES = { STORAGE_SYNC: "storageSync", AGENT_FILE: "agentFile", ARTIFACT_UPLOAD: "artifactUpload", + GIT_AI_INTEGRATION: "gitAiIntegration", } as const; /** diff --git a/extensions/cli/src/tools/gitAiIntegration.test.ts b/extensions/cli/src/tools/gitAiIntegration.test.ts new file mode 100644 index 00000000000..cc5c5f52a9e --- /dev/null +++ b/extensions/cli/src/tools/gitAiIntegration.test.ts @@ -0,0 +1,337 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +import type { PreprocessedToolCall } from "./types.js"; + +import { executeToolCall } from "./index.js"; + +// Mock the services +vi.mock("../services/index.js", () => ({ + services: { + gitAiIntegration: { + trackToolUse: vi.fn().mockResolvedValue(undefined), + }, + }, + SERVICE_NAMES: {}, + serviceContainer: {}, +})); + +// Mock telemetry services +vi.mock("../telemetry/telemetryService.js", () => ({ + telemetryService: { + logToolResult: vi.fn(), + }, +})); + +vi.mock("../telemetry/posthogService.js", () => ({ + posthogService: { + capture: vi.fn(), + }, +})); + +// Mock logger +vi.mock("../util/logger.js", () => ({ + logger: { + debug: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, +})); + +describe("Git AI Integration - executeToolCall", () => { + let mockGitAiService: any; + + beforeEach(async () => { + const { services } = await import("../services/index.js"); + mockGitAiService = services.gitAiIntegration; + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + describe("File editing tools", () => { + it("should call git-ai before and after Edit tool execution", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("Edit completed"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-edit-id", + name: "Edit", + arguments: { file_path: "/test/file.ts" }, + argumentsStr: JSON.stringify({ file_path: "/test/file.ts" }), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { + resolvedPath: "/test/file.ts", + oldContent: "old", + newContent: "new", + }, + }, + }; + + const result = await executeToolCall(toolCall); + + expect(result).toBe("Edit completed"); + + // Should call trackToolUse twice: PreToolUse and PostToolUse + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + + // Check PreToolUse call + expect(mockGitAiService.trackToolUse).toHaveBeenNthCalledWith( + 1, + toolCall, + "PreToolUse", + ); + + // Check PostToolUse call + expect(mockGitAiService.trackToolUse).toHaveBeenNthCalledWith( + 2, + toolCall, + "PostToolUse", + ); + + // Verify tool.run was called + expect(mockTool.run).toHaveBeenCalledWith({ + resolvedPath: "/test/file.ts", + oldContent: "old", + newContent: "new", + }); + }); + + it("should call git-ai before and after MultiEdit tool execution", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("MultiEdit completed"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-multiedit-id", + name: "MultiEdit", + arguments: { file_path: "/test/file.ts" }, + argumentsStr: JSON.stringify({ file_path: "/test/file.ts" }), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { + file_path: "/test/file.ts", + edits: [], + }, + }, + }; + + await executeToolCall(toolCall); + + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PreToolUse", + ); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PostToolUse", + ); + }); + + it("should call git-ai before and after Write tool execution", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("Write completed"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-write-id", + name: "Write", + arguments: { filepath: "/test/newfile.ts" }, + argumentsStr: JSON.stringify({ filepath: "/test/newfile.ts" }), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { + filepath: "/test/newfile.ts", + content: "new content", + }, + }, + }; + + await executeToolCall(toolCall); + + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PreToolUse", + ); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PostToolUse", + ); + }); + + it("should complete file edit even if git-ai tracking encounters errors internally", async () => { + // Note: trackToolUse has internal error handling, so it won't throw + // This test verifies that the tool execution completes normally + const mockTool = { + run: vi.fn().mockResolvedValue("Edit completed despite internal error"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + const result = await executeToolCall(toolCall); + + expect(result).toBe("Edit completed despite internal error"); + expect(mockTool.run).toHaveBeenCalled(); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + }); + }); + + describe("Non-file editing tools", () => { + it("should call trackToolUse for Bash tool (service will no-op internally)", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("Command output"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-bash-id", + name: "Bash", + arguments: { command: "ls" }, + argumentsStr: JSON.stringify({ command: "ls" }), + startNotified: false, + tool: mockTool as any, + }; + + await executeToolCall(toolCall); + + // trackToolUse is called but service checks isFileEdit internally + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PreToolUse", + ); + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PostToolUse", + ); + expect(mockTool.run).toHaveBeenCalled(); + }); + + it("should call trackToolUse for Read tool (service will no-op internally)", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("File contents"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-read-id", + name: "Read", + arguments: { file_path: "/test/file.ts" }, + argumentsStr: JSON.stringify({ file_path: "/test/file.ts" }), + startNotified: false, + tool: mockTool as any, + }; + + await executeToolCall(toolCall); + + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + expect(mockTool.run).toHaveBeenCalled(); + }); + + it("should call trackToolUse for Grep tool (service will no-op internally)", async () => { + const mockTool = { + run: vi.fn().mockResolvedValue("Search results"), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-grep-id", + name: "Grep", + arguments: { pattern: "test" }, + argumentsStr: JSON.stringify({ pattern: "test" }), + startNotified: false, + tool: mockTool as any, + }; + + await executeToolCall(toolCall); + + expect(mockGitAiService.trackToolUse).toHaveBeenCalledTimes(2); + expect(mockTool.run).toHaveBeenCalled(); + }); + }); + + describe("Error handling", () => { + it("should propagate tool execution errors", async () => { + const mockTool = { + run: vi.fn().mockRejectedValue(new Error("Tool execution failed")), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + await expect(executeToolCall(toolCall)).rejects.toThrow( + "Tool execution failed", + ); + + // PreToolUse should have been called + expect(mockGitAiService.trackToolUse).toHaveBeenCalledWith( + toolCall, + "PreToolUse", + ); + }); + }); + + describe("Execution order", () => { + it("should execute in correct order: PreToolUse -> tool.run -> PostToolUse", async () => { + const executionOrder: string[] = []; + + mockGitAiService.trackToolUse.mockImplementation( + (_toolCall: any, phase: string) => { + executionOrder.push(`git-ai:${phase}`); + return Promise.resolve(); + }, + ); + + const mockTool = { + run: vi.fn().mockImplementation(() => { + executionOrder.push("tool:run"); + return Promise.resolve("result"); + }), + }; + + const toolCall: PreprocessedToolCall = { + id: "test-id", + name: "Edit", + arguments: {}, + argumentsStr: JSON.stringify({}), + startNotified: false, + tool: mockTool as any, + preprocessResult: { + args: { resolvedPath: "/test/file.ts" }, + }, + }; + + await executeToolCall(toolCall); + + expect(executionOrder).toEqual([ + "git-ai:PreToolUse", + "tool:run", + "git-ai:PostToolUse", + ]); + }); + }); +}); diff --git a/extensions/cli/src/tools/index.tsx b/extensions/cli/src/tools/index.tsx index cf3e1b1dd1d..efc7a1e5f79 100644 --- a/extensions/cli/src/tools/index.tsx +++ b/extensions/cli/src/tools/index.tsx @@ -207,6 +207,9 @@ export async function executeToolCall( arguments: toolCall.arguments, }); + // Track edits if Git AI is enabled (no-op if not enabled) + await services.gitAiIntegration.trackToolUse(toolCall, "PreToolUse"); + // IMPORTANT: if preprocessed args are present, uses preprocessed args instead of original args // Preprocessed arg names may be different const result = await toolCall.tool.run( @@ -214,6 +217,9 @@ export async function executeToolCall( ); const duration = Date.now() - startTime; + // Track edits if Git AI is enabled (no-op if not enabled) + await services.gitAiIntegration.trackToolUse(toolCall, "PostToolUse"); + telemetryService.logToolResult({ toolName: toolCall.name, success: true, diff --git a/extensions/cli/src/ui/__tests__/TUIChat.slashCommands.test.tsx b/extensions/cli/src/ui/__tests__/TUIChat.slashCommands.test.tsx index e1ee6d001be..c0a7a3b7b89 100644 --- a/extensions/cli/src/ui/__tests__/TUIChat.slashCommands.test.tsx +++ b/extensions/cli/src/ui/__tests__/TUIChat.slashCommands.test.tsx @@ -125,15 +125,19 @@ describe("TUIChat - Slash Commands Tests", () => { stdin.write("/title"); let frameAfterCommand = lastFrame(); - await waitForCondition(() => { - frameAfterCommand = lastFrame(); - - return ( - frameAfterCommand?.includes( - mode === "remote" ? "Remote Mode" : "/title", - ) ?? false - ); - }); + await waitForCondition( + () => { + frameAfterCommand = lastFrame(); + + return ( + frameAfterCommand?.includes( + mode === "remote" ? "Remote Mode" : "/title", + ) ?? false + ); + }, + 5000, + 100, + ); if (mode === "remote") { // In remote mode, /title might not be a valid command, so just check we're in remote mode diff --git a/extensions/intellij/README.md b/extensions/intellij/README.md index 1eea37b85cb..3fcb09ddbfa 100644 --- a/extensions/intellij/README.md +++ b/extensions/intellij/README.md @@ -24,7 +24,7 @@ -Get started in [Mission Control](https://hub.continue.dev/agents), [CLI (Headless Mode)](https://docs.continue.dev/cli/quick-start#headless-mode), or [CLI (TUI mode)](https://docs.continue.dev/cli/quick-start#tui-mode) +Get started in [Mission Control](https://hub.continue.dev/hub?type=agents), [CLI (Headless Mode)](https://docs.continue.dev/cli/quick-start#headless-mode), or [CLI (TUI mode)](https://docs.continue.dev/cli/quick-start#tui-mode) ## Agent diff --git a/extensions/intellij/src/main/kotlin/com/github/continuedev/continueintellijextension/continue/IntelliJIde.kt b/extensions/intellij/src/main/kotlin/com/github/continuedev/continueintellijextension/continue/IntelliJIde.kt index 19e236465a2..af007259659 100644 --- a/extensions/intellij/src/main/kotlin/com/github/continuedev/continueintellijextension/continue/IntelliJIde.kt +++ b/extensions/intellij/src/main/kotlin/com/github/continuedev/continueintellijextension/continue/IntelliJIde.kt @@ -60,8 +60,8 @@ class IntelliJIDE( "*.db", "*.sqlite", "*.sqlite3", "*.mdb", "*.accdb", // Credential and secret files - "*.secret", "*.secrets", "credentials", "credentials.*", "auth.json", - "token", "token.*", "*.token", + "*.secret", "*.secrets", "credentials", "auth.json", + "token", "*.token", // Backup files that might contain sensitive data "*.bak", "*.backup", "*.old", "*.orig", diff --git a/extensions/vscode/package-lock.json b/extensions/vscode/package-lock.json index 4d085843470..60a79ab45d5 100644 --- a/extensions/vscode/package-lock.json +++ b/extensions/vscode/package-lock.json @@ -34,7 +34,7 @@ "jsdom": "^24.0.0", "lru-cache": "^11.0.2", "minisearch": "^7.0.0", - "mocha": "^11.7.1", + "mocha": "^11.7.5", "monaco-editor": "^0.45.0", "monaco-vscode-textmate-theme-converter": "^0.1.7", "ncp": "^2.0.0", @@ -100,16 +100,16 @@ "license": "Apache-2.0", "dependencies": { "@anthropic-ai/sdk": "^0.62.0", - "@aws-sdk/client-bedrock-runtime": "^3.779.0", + "@aws-sdk/client-bedrock-runtime": "^3.931.0", "@aws-sdk/client-sagemaker-runtime": "^3.777.0", - "@aws-sdk/credential-providers": "^3.778.0", + "@aws-sdk/credential-providers": "^3.931.0", "@continuedev/config-types": "^1.0.13", "@continuedev/config-yaml": "file:../packages/config-yaml", "@continuedev/fetch": "file:../packages/fetch", "@continuedev/llm-info": "file:../packages/llm-info", "@continuedev/openai-adapters": "file:../packages/openai-adapters", "@continuedev/terminal-security": "file:../packages/terminal-security", - "@modelcontextprotocol/sdk": "^1.12.0", + "@modelcontextprotocol/sdk": "^1.24.0", "@mozilla/readability": "^0.5.0", "@octokit/rest": "^20.1.1", "@sentry/cli": "^2.50.2", @@ -7820,7 +7820,6 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, "engines": { "node": ">=8" } @@ -9417,9 +9416,9 @@ "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==" }, "node_modules/mocha": { - "version": "11.7.1", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.7.1.tgz", - "integrity": "sha512-5EK+Cty6KheMS/YLPPMJC64g5V61gIR25KsRItHw6x4hEKT6Njp1n9LOlH4gpevuwMVS66SXaBBpg+RWZkza4A==", + "version": "11.7.5", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.7.5.tgz", + "integrity": "sha512-mTT6RgopEYABzXWFx+GcJ+ZQ32kp4fMf0xvpZIIfSq9Z8lC/++MtcCnQ9t5FP2veYEP95FIYSvW+U9fV4xrlig==", "license": "MIT", "dependencies": { "browser-stdout": "^1.3.1", @@ -9430,6 +9429,7 @@ "find-up": "^5.0.0", "glob": "^10.4.5", "he": "^1.2.0", + "is-path-inside": "^3.0.3", "js-yaml": "^4.1.0", "log-symbols": "^4.1.0", "minimatch": "^9.0.5", diff --git a/extensions/vscode/package.json b/extensions/vscode/package.json index 9808290a373..c62ab0f40d1 100644 --- a/extensions/vscode/package.json +++ b/extensions/vscode/package.json @@ -753,7 +753,7 @@ "jsdom": "^24.0.0", "lru-cache": "^11.0.2", "minisearch": "^7.0.0", - "mocha": "^11.7.1", + "mocha": "^11.7.5", "monaco-editor": "^0.45.0", "monaco-vscode-textmate-theme-converter": "^0.1.7", "ncp": "^2.0.0", diff --git a/package-lock.json b/package-lock.json index a4b2f971428..fa5611dbe04 100644 --- a/package-lock.json +++ b/package-lock.json @@ -380,7 +380,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -1260,7 +1259,6 @@ "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -3472,7 +3470,6 @@ "integrity": "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==", "dev": true, "license": "MIT", - "peer": true, "bin": { "prettier": "bin/prettier.cjs" }, @@ -4412,7 +4409,6 @@ "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" diff --git a/packages/openai-adapters/package-lock.json b/packages/openai-adapters/package-lock.json index b110cac0ea1..aa25508cbc5 100644 --- a/packages/openai-adapters/package-lock.json +++ b/packages/openai-adapters/package-lock.json @@ -1025,6 +1025,7 @@ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -1067,6 +1068,7 @@ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", + "peer": true, "bin": { "semver": "bin/semver.js" } @@ -1077,6 +1079,7 @@ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/parser": "^7.28.5", "@babel/types": "^7.28.5", @@ -1094,6 +1097,7 @@ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", @@ -1111,6 +1115,7 @@ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "license": "ISC", + "peer": true, "bin": { "semver": "bin/semver.js" } @@ -1121,6 +1126,7 @@ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -1131,6 +1137,7 @@ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" @@ -1145,6 +1152,7 @@ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-module-imports": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1", @@ -1163,6 +1171,7 @@ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -1173,6 +1182,7 @@ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -1193,6 +1203,7 @@ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -1203,6 +1214,7 @@ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.28.4" @@ -1217,6 +1229,7 @@ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/types": "^7.28.5" }, @@ -1233,6 +1246,7 @@ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1246,6 +1260,7 @@ "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1259,6 +1274,7 @@ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.12.13" }, @@ -1272,6 +1288,7 @@ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1288,6 +1305,7 @@ "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, @@ -1304,6 +1322,7 @@ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1317,6 +1336,7 @@ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1330,6 +1350,7 @@ "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, @@ -1346,6 +1367,7 @@ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1359,6 +1381,7 @@ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1372,6 +1395,7 @@ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.10.4" }, @@ -1385,6 +1409,7 @@ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1398,6 +1423,7 @@ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1411,6 +1437,7 @@ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -1424,6 +1451,7 @@ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1440,6 +1468,7 @@ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.14.5" }, @@ -1456,6 +1485,7 @@ "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, @@ -1481,6 +1511,7 @@ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", @@ -1496,6 +1527,7 @@ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -1515,6 +1547,7 @@ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" @@ -1528,7 +1561,8 @@ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@colors/colors": { "version": "1.5.0", @@ -1608,6 +1642,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" @@ -1620,6 +1655,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "tslib": "^2.4.0" } @@ -1631,6 +1667,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "tslib": "^2.4.0" } @@ -2121,6 +2158,7 @@ "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "camelcase": "^5.3.1", "find-up": "^4.1.0", @@ -2138,6 +2176,7 @@ "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -2148,6 +2187,7 @@ "integrity": "sha512-+O1ifRjkvYIkBqASKWgLxrpEhQAAE7hY77ALLUufSk5717KfOShg6IbqLmdsLMPdUiFvA2kTs0R7YZy+l0IzZQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2166,6 +2206,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2179,6 +2220,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -2200,6 +2242,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2218,6 +2261,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2231,6 +2275,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -2246,6 +2291,7 @@ "integrity": "sha512-03W6IhuhjqTlpzh/ojut/pDB2LPRygyWX8ExpgHtQA8H/3K7+1vKmcINx5UzeOX1se6YEsBsOHQ1CRzf3fOwTQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/console": "30.2.0", "@jest/pattern": "30.0.1", @@ -2294,6 +2340,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2307,6 +2354,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -2328,6 +2376,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2346,6 +2395,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2359,6 +2409,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -2374,6 +2425,7 @@ "integrity": "sha512-n5H8QLDJ47QqbCNn5SuFjCRDrOLEZ0h8vAHCK5RL9Ls7Xa8AQLa/YxAc9UjFqoEDM48muwtBGjtMY5cr0PLDCw==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" } @@ -2384,6 +2436,7 @@ "integrity": "sha512-/QPTL7OBJQ5ac09UDRa3EQes4gt1FTEG/8jZ/4v5IVzx+Cv7dLxlVIvfvSVRiiX2drWyXeBjkMSR8hvOWSog5g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/fake-timers": "30.2.0", "@jest/types": "30.2.0", @@ -2400,6 +2453,7 @@ "integrity": "sha512-V9yxQK5erfzx99Sf+7LbhBwNWEZ9eZay8qQ9+JSC0TrMR1pMDHLMY+BnVPacWU6Jamrh252/IKo4F1Xn/zfiqA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "expect": "30.2.0", "jest-snapshot": "30.2.0" @@ -2427,6 +2481,7 @@ "integrity": "sha512-1JnRfhqpD8HGpOmQp180Fo9Zt69zNtC+9lR+kT7NVL05tNXIi+QC8Csz7lfidMoVLPD3FnOtcmp0CEFnxExGEA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0" }, @@ -2440,6 +2495,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2453,6 +2509,7 @@ "integrity": "sha512-u/feCi0GPsI+988gU2FLcsHyAHTU0MX1Wg68NhAnN7z/+C5wqG+CY8J53N9ioe8RXgaoz0nBR/TYMf3AycUuPw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/expect-utils": "30.2.0", "@jest/get-type": "30.1.0", @@ -2471,6 +2528,7 @@ "integrity": "sha512-dQHFo3Pt4/NLlG5z4PxZ/3yZTZ1C7s9hveiOj+GCN+uT109NC2QgsoVZsVOAvbJ3RgKkvyLGXZV9+piDpWbm6A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/diff-sequences": "30.0.1", "@jest/get-type": "30.1.0", @@ -2487,6 +2545,7 @@ "integrity": "sha512-dQ94Nq4dbzmUWkQ0ANAWS9tBRfqCrn0bV9AMYdOi/MHW726xn7eQmMeRTpX2ViC00bpNaWXq+7o4lIQ3AX13Hg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "chalk": "^4.1.2", @@ -2503,6 +2562,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -2524,6 +2584,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2542,6 +2603,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2555,6 +2617,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -2570,6 +2633,7 @@ "integrity": "sha512-HI3tRLjRxAbBy0VO8dqqm7Hb2mIa8d5bg/NJkyQcOk7V118ObQML8RC5luTF/Zsg4474a+gDvhce7eTnP4GhYw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@sinonjs/fake-timers": "^13.0.0", @@ -2588,6 +2652,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2601,6 +2666,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -2622,6 +2688,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2640,6 +2707,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2653,6 +2721,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -2668,6 +2737,7 @@ "integrity": "sha512-eMbZE2hUnx1WV0pmURZY9XoXPkUYjpc55mb0CrhtdWLtzMQPFvu/rZkTLZFTsdaVQa+Tr4eWAteqcUzoawq/uA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" } @@ -2678,6 +2748,7 @@ "integrity": "sha512-b63wmnKPaK+6ZZfpYhz9K61oybvbI1aMcIs80++JI1O1rR1vaxHUCNqo3ITu6NU0d4V34yZFoHMn/uoKr/Rwfw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/environment": "30.2.0", "@jest/expect": "30.2.0", @@ -2694,6 +2765,7 @@ "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/node": "*", "jest-regex-util": "30.0.1" @@ -2708,6 +2780,7 @@ "integrity": "sha512-DRyW6baWPqKMa9CzeiBjHwjd8XeAyco2Vt8XbcLFjiwCOEKOvy82GJ8QQnJE9ofsxCMPjH4MfH8fCWIHHDKpAQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@bcoe/v8-coverage": "^0.2.3", "@jest/console": "30.2.0", @@ -2751,6 +2824,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2764,6 +2838,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -2785,6 +2860,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2803,6 +2879,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2816,6 +2893,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -2831,6 +2909,7 @@ "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@sinclair/typebox": "^0.34.0" }, @@ -2844,6 +2923,7 @@ "integrity": "sha512-0aVxM3RH6DaiLcjj/b0KrIBZhSX1373Xci4l3cW5xiUWPctZ59zQ7jj4rqcJQ/Z8JuN/4wX3FpJSa3RssVvCug==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "chalk": "^4.1.2", @@ -2860,6 +2940,7 @@ "integrity": "sha512-MIRWMUUR3sdbP36oyNyhbThLHyJ2eEDClPCiHVbrYAe5g3CHRArIVpBw7cdSB5fr+ofSfIb2Tnsw8iEHL0PYQg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.25", "callsites": "^3.1.0", @@ -2875,6 +2956,7 @@ "integrity": "sha512-RF+Z+0CCHkARz5HT9mcQCBulb1wgCP3FBvl9VFokMX27acKphwyQsNuWH3c+ojd1LeWBLoTYoxF0zm6S/66mjg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/console": "30.2.0", "@jest/types": "30.2.0", @@ -2891,6 +2973,7 @@ "integrity": "sha512-wXKgU/lk8fKXMu/l5Hog1R61bL4q5GCdT6OJvdAFz1P+QrpoFuLU68eoKuVc4RbrTtNnTL5FByhWdLgOPSph+Q==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/test-result": "30.2.0", "graceful-fs": "^4.2.11", @@ -2907,6 +2990,7 @@ "integrity": "sha512-XsauDV82o5qXbhalKxD7p4TZYYdwcaEXC77PPD2HixEFF+6YGppjrAAQurTl2ECWcEomHBMMNS9AH3kcCFx8jA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/core": "^7.27.4", "@jest/types": "30.2.0", @@ -2934,6 +3018,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -2952,6 +3037,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -2965,6 +3051,7 @@ "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/pattern": "30.0.1", "@jest/schemas": "30.0.5", @@ -2984,6 +3071,7 @@ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" @@ -2995,6 +3083,7 @@ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" @@ -3023,6 +3112,7 @@ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -3035,6 +3125,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "@emnapi/core": "^1.4.3", "@emnapi/runtime": "^1.4.3", @@ -3057,7 +3148,6 @@ "integrity": "sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@octokit/auth-token": "^6.0.0", "@octokit/graphql": "^9.0.3", @@ -3240,6 +3330,7 @@ "integrity": "sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": "^12.20.0 || ^14.18.0 || >=16.0.0" }, @@ -4045,7 +4136,8 @@ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz", "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@sindresorhus/is": { "version": "4.6.0", @@ -4079,6 +4171,7 @@ "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "type-detect": "4.0.8" } @@ -4089,6 +4182,7 @@ "integrity": "sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@sinonjs/commons": "^3.0.1" } @@ -4778,6 +4872,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "tslib": "^2.4.0" } @@ -4788,6 +4883,7 @@ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", @@ -4802,6 +4898,7 @@ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/types": "^7.0.0" } @@ -4812,6 +4909,7 @@ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" @@ -4823,6 +4921,7 @@ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/types": "^7.28.2" } @@ -4919,7 +5018,6 @@ "integrity": "sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -4960,7 +5058,8 @@ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", "dev": true, - "license": "ISC" + "license": "ISC", + "peer": true }, "node_modules/@unrs/resolver-binding-android-arm-eabi": { "version": "1.11.1", @@ -4974,7 +5073,8 @@ "optional": true, "os": [ "android" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-android-arm64": { "version": "1.11.1", @@ -4988,7 +5088,8 @@ "optional": true, "os": [ "android" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-darwin-arm64": { "version": "1.11.1", @@ -5002,7 +5103,8 @@ "optional": true, "os": [ "darwin" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-darwin-x64": { "version": "1.11.1", @@ -5016,7 +5118,8 @@ "optional": true, "os": [ "darwin" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-freebsd-x64": { "version": "1.11.1", @@ -5030,7 +5133,8 @@ "optional": true, "os": [ "freebsd" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-arm-gnueabihf": { "version": "1.11.1", @@ -5044,7 +5148,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-arm-musleabihf": { "version": "1.11.1", @@ -5058,7 +5163,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-arm64-gnu": { "version": "1.11.1", @@ -5072,7 +5178,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-arm64-musl": { "version": "1.11.1", @@ -5086,7 +5193,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-ppc64-gnu": { "version": "1.11.1", @@ -5100,7 +5208,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-riscv64-gnu": { "version": "1.11.1", @@ -5114,7 +5223,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-riscv64-musl": { "version": "1.11.1", @@ -5128,7 +5238,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-s390x-gnu": { "version": "1.11.1", @@ -5142,7 +5253,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-x64-gnu": { "version": "1.11.1", @@ -5156,7 +5268,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-linux-x64-musl": { "version": "1.11.1", @@ -5170,7 +5283,8 @@ "optional": true, "os": [ "linux" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-wasm32-wasi": { "version": "1.11.1", @@ -5182,6 +5296,7 @@ "dev": true, "license": "MIT", "optional": true, + "peer": true, "dependencies": { "@napi-rs/wasm-runtime": "^0.2.11" }, @@ -5201,7 +5316,8 @@ "optional": true, "os": [ "win32" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-win32-ia32-msvc": { "version": "1.11.1", @@ -5215,7 +5331,8 @@ "optional": true, "os": [ "win32" - ] + ], + "peer": true }, "node_modules/@unrs/resolver-binding-win32-x64-msvc": { "version": "1.11.1", @@ -5229,7 +5346,8 @@ "optional": true, "os": [ "win32" - ] + ], + "peer": true }, "node_modules/@vitest/expect": { "version": "3.2.4", @@ -5427,6 +5545,7 @@ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "type-fest": "^0.21.3" }, @@ -5477,6 +5596,7 @@ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -5498,6 +5618,7 @@ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "sprintf-js": "~1.0.2" } @@ -5532,6 +5653,7 @@ "integrity": "sha512-0YiBEOxWqKkSQWL9nNGGEgndoeL0ZpWrbLMNL5u/Kaxrli3Eaxlt3ZtIDktEvXt4L/R9r3ODr2zKwGM/2BjxVw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/transform": "30.2.0", "@types/babel__core": "^7.20.5", @@ -5554,6 +5676,7 @@ "integrity": "sha512-D8Z6Qm8jCvVXtIRkBnqNHX0zJ37rQcFJ9u8WOS6tkYOsRdHBzypCstaxWiu5ZIlqQtviRYbgnRLSoCEvjqcqbA==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "workspaces": [ "test/babel-8" ], @@ -5574,6 +5697,7 @@ "integrity": "sha512-ftzhzSGMUnOzcCXd6WHdBGMyuwy15Wnn0iyyWGKgBDLxf9/s5ABuraCSpBX2uG0jUg4rqJnxsLc5+oYBqoxVaA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/babel__core": "^7.20.5" }, @@ -5587,6 +5711,7 @@ "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-bigint": "^7.8.3", @@ -5614,6 +5739,7 @@ "integrity": "sha512-US4Z3NOieAQumwFnYdUWKvUKh8+YSnS/gB3t6YBiz0bskpu7Pine8pPCheNxlPEW4wnUkma2a94YuW2q3guvCQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "babel-plugin-jest-hoist": "30.2.0", "babel-preset-current-node-syntax": "^1.2.0" @@ -5657,6 +5783,7 @@ "integrity": "sha512-PxSsosKQjI38iXkmb3d0Y32efqyA0uW4s41u4IVBsLlWLhCiYNpH/AfNOVWRqCQBlD8TFJTz6OUWNd4DFJCnmw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "baseline-browser-mapping": "dist/cli.js" } @@ -5766,6 +5893,7 @@ "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "node-int64": "^0.4.0" } @@ -5781,7 +5909,8 @@ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/cac": { "version": "6.7.14", @@ -5809,6 +5938,7 @@ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -5832,7 +5962,8 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "CC-BY-4.0" + "license": "CC-BY-4.0", + "peer": true }, "node_modules/chai": { "version": "5.3.3", @@ -5910,7 +6041,8 @@ "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-2.1.1.tgz", "integrity": "sha512-+CmxIZ/L2vNcEfvNtLdU0ZQ6mbq3FZnwAP2PPTiKP+1QOoKwlKlPgb8UKV0Dds7QVaMnHm+FwSft2VB0s/SLjQ==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/clean-stack": { "version": "2.2.0", @@ -6193,6 +6325,7 @@ "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "iojs": ">= 1.0.0", "node": ">= 0.12.0" @@ -6203,7 +6336,8 @@ "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz", "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/color-convert": { "version": "2.0.1", @@ -6239,7 +6373,8 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/config-chain": { "version": "1.1.13", @@ -6328,7 +6463,8 @@ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/core-util-is": { "version": "1.0.3", @@ -6485,6 +6621,7 @@ "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", "dev": true, "license": "MIT", + "peer": true, "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, @@ -6520,6 +6657,7 @@ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -6539,6 +6677,7 @@ "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -6637,7 +6776,8 @@ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.265.tgz", "integrity": "sha512-B7IkLR1/AE+9jR2LtVF/1/6PFhY5TlnEHnlrKmGk7PvkJibg5jr+mLXLLzq3QYl6PA1T/vLDthQPqIPAlS/PPA==", "dev": true, - "license": "ISC" + "license": "ISC", + "peer": true }, "node_modules/emittery": { "version": "0.13.1", @@ -6645,6 +6785,7 @@ "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -6931,6 +7072,7 @@ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, "license": "BSD-2-Clause", + "peer": true, "bin": { "esparse": "bin/esparse.js", "esvalidate": "bin/esvalidate.js" @@ -6992,6 +7134,7 @@ "integrity": "sha512-+I6B/IkJc1o/2tiURyz/ivu/O0nKNEArIUB5O7zBrlDVJr22SCLH3xTeEry428LvFhRzIA1g8izguxJ/gbNcVQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">= 0.8.0" } @@ -7077,6 +7220,7 @@ "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "bser": "2.1.1" } @@ -7139,6 +7283,7 @@ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "locate-path": "^5.0.0", "path-exists": "^4.0.0" @@ -7268,7 +7413,8 @@ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true, - "license": "ISC" + "license": "ISC", + "peer": true }, "node_modules/fsevents": { "version": "2.3.3", @@ -7333,6 +7479,7 @@ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -7353,6 +7500,7 @@ "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8.0.0" } @@ -7532,7 +7680,8 @@ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/http-proxy-agent": { "version": "7.0.2", @@ -7617,6 +7766,7 @@ "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "pkg-dir": "^4.2.0", "resolve-cwd": "^3.0.0" @@ -7683,6 +7833,7 @@ "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -7741,6 +7892,7 @@ "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -7840,6 +7992,7 @@ "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "engines": { "node": ">=8" } @@ -7850,6 +8003,7 @@ "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@babel/core": "^7.23.9", "@babel/parser": "^7.23.9", @@ -7867,6 +8021,7 @@ "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "istanbul-lib-coverage": "^3.0.0", "make-dir": "^4.0.0", @@ -7882,6 +8037,7 @@ "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.23", "debug": "^4.1.1", @@ -7897,6 +8053,7 @@ "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "html-escaper": "^2.0.0", "istanbul-lib-report": "^3.0.0" @@ -7964,6 +8121,7 @@ "integrity": "sha512-L8lR1ChrRnSdfeOvTrwZMlnWV8G/LLjQ0nG9MBclwWZidA2N5FviRki0Bvh20WRMOX31/JYvzdqTJrk5oBdydQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "execa": "^5.1.1", "jest-util": "30.2.0", @@ -7979,6 +8137,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -7997,6 +8156,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8010,6 +8170,7 @@ "integrity": "sha512-Fh0096NC3ZkFx05EP2OXCxJAREVxj1BcW/i6EWqqymcgYKWjyyDpral3fMxVcHXg6oZM7iULer9wGRFvfpl+Tg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/environment": "30.2.0", "@jest/expect": "30.2.0", @@ -8042,6 +8203,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8055,6 +8217,7 @@ "integrity": "sha512-dQHFo3Pt4/NLlG5z4PxZ/3yZTZ1C7s9hveiOj+GCN+uT109NC2QgsoVZsVOAvbJ3RgKkvyLGXZV9+piDpWbm6A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/diff-sequences": "30.0.1", "@jest/get-type": "30.1.0", @@ -8071,6 +8234,7 @@ "integrity": "sha512-dQ94Nq4dbzmUWkQ0ANAWS9tBRfqCrn0bV9AMYdOi/MHW726xn7eQmMeRTpX2ViC00bpNaWXq+7o4lIQ3AX13Hg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "chalk": "^4.1.2", @@ -8087,6 +8251,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -8108,6 +8273,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8126,6 +8292,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8139,6 +8306,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -8154,6 +8322,7 @@ "integrity": "sha512-Os9ukIvADX/A9sLt6Zse3+nmHtHaE6hqOsjQtNiugFTbKRHYIYtZXNGNK9NChseXy7djFPjndX1tL0sCTlfpAA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/core": "30.2.0", "@jest/test-result": "30.2.0", @@ -8187,6 +8356,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8205,6 +8375,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8218,6 +8389,7 @@ "integrity": "sha512-g4WkyzFQVWHtu6uqGmQR4CQxz/CH3yDSlhzXMWzNjDx843gYjReZnMRanjRCq5XZFuQrGDxgUaiYWE8BRfVckA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/core": "^7.27.4", "@jest/get-type": "30.1.0", @@ -8270,6 +8442,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8283,6 +8456,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8301,6 +8475,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8314,6 +8489,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -8345,6 +8521,7 @@ "integrity": "sha512-tR/FFgZKS1CXluOQzZvNH3+0z9jXr3ldGSD8bhyuxvlVUwbeLOGynkunvlTMxchC5urrKndYiwCFC0DLVjpOCA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "detect-newline": "^3.1.0" }, @@ -8358,6 +8535,7 @@ "integrity": "sha512-lpWlJlM7bCUf1mfmuqTA8+j2lNURW9eNafOy99knBM01i5CQeY5UH1vZjgT9071nDJac1M4XsbyI44oNOdhlDQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "@jest/types": "30.2.0", @@ -8375,6 +8553,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8388,6 +8567,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8406,6 +8586,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8419,6 +8600,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -8434,6 +8616,7 @@ "integrity": "sha512-ElU8v92QJ9UrYsKrxDIKCxu6PfNj4Hdcktcn0JX12zqNdqWHB0N+hwOnnBBXvjLd2vApZtuLUGs1QSY+MsXoNA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/environment": "30.2.0", "@jest/fake-timers": "30.2.0", @@ -8453,6 +8636,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8471,6 +8655,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8494,6 +8679,7 @@ "integrity": "sha512-sQA/jCb9kNt+neM0anSj6eZhLZUIhQgwDt7cPGjumgLM4rXsfb9kpnlacmvZz3Q5tb80nS+oG/if+NBKrHC+Xw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8519,6 +8705,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8537,6 +8724,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8550,6 +8738,7 @@ "integrity": "sha512-M6jKAjyzjHG0SrQgwhgZGy9hFazcudwCNovY/9HPIicmNSBuockPSedAP9vlPK6ONFJ1zfyH/M2/YYJxOz5cdQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "pretty-format": "30.2.0" @@ -8564,6 +8753,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8577,6 +8767,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -8667,6 +8858,7 @@ "integrity": "sha512-JNNNl2rj4b5ICpmAcq+WbLH83XswjPbjH4T7yvGzfAGCPh1rw+xVNbtk+FnRslvt9lkCcdn9i1oAoKUuFsOxRw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8682,6 +8874,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8700,6 +8893,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8713,6 +8907,7 @@ "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" }, @@ -8731,6 +8926,7 @@ "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0" } @@ -8741,6 +8937,7 @@ "integrity": "sha512-TCrHSxPlx3tBY3hWNtRQKbtgLhsXa1WmbJEqBlTBrGafd5fiQFByy2GNCEoGR+Tns8d15GaL9cxEzKOO3GEb2A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "chalk": "^4.1.2", "graceful-fs": "^4.2.11", @@ -8761,6 +8958,7 @@ "integrity": "sha512-xTOIGug/0RmIe3mmCqCT95yO0vj6JURrn1TKWlNbhiAefJRWINNPgwVkrVgt/YaerPzY3iItufd80v3lOrFJ2w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "jest-regex-util": "30.0.1", "jest-snapshot": "30.2.0" @@ -8775,6 +8973,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8793,6 +8992,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8806,6 +9006,7 @@ "integrity": "sha512-PqvZ2B2XEyPEbclp+gV6KO/F1FIFSbIwewRgmROCMBo/aZ6J1w8Qypoj2pEOcg3G2HzLlaP6VUtvwCI8dM3oqQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/console": "30.2.0", "@jest/environment": "30.2.0", @@ -8840,6 +9041,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8853,6 +9055,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -8874,6 +9077,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -8892,6 +9096,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -8905,6 +9110,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -8920,6 +9126,7 @@ "integrity": "sha512-p1+GVX/PJqTucvsmERPMgCPvQJpFt4hFbM+VN3n8TMo47decMUcJbt+rgzwrEme0MQUA/R+1de2axftTHkKckg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/environment": "30.2.0", "@jest/fake-timers": "30.2.0", @@ -8954,6 +9161,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -8967,6 +9175,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -8988,6 +9197,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -9006,6 +9216,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9019,6 +9230,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -9034,6 +9246,7 @@ "integrity": "sha512-5WEtTy2jXPFypadKNpbNkZ72puZCa6UjSr/7djeecHWOu7iYhSXSnHScT8wBz3Rn8Ena5d5RYRcsyKIeqG1IyA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/core": "^7.27.4", "@babel/generator": "^7.27.5", @@ -9067,6 +9280,7 @@ "integrity": "sha512-1JnRfhqpD8HGpOmQp180Fo9Zt69zNtC+9lR+kT7NVL05tNXIi+QC8Csz7lfidMoVLPD3FnOtcmp0CEFnxExGEA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0" }, @@ -9080,6 +9294,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -9093,6 +9308,7 @@ "integrity": "sha512-u/feCi0GPsI+988gU2FLcsHyAHTU0MX1Wg68NhAnN7z/+C5wqG+CY8J53N9ioe8RXgaoz0nBR/TYMf3AycUuPw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/expect-utils": "30.2.0", "@jest/get-type": "30.1.0", @@ -9111,6 +9327,7 @@ "integrity": "sha512-dQHFo3Pt4/NLlG5z4PxZ/3yZTZ1C7s9hveiOj+GCN+uT109NC2QgsoVZsVOAvbJ3RgKkvyLGXZV9+piDpWbm6A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/diff-sequences": "30.0.1", "@jest/get-type": "30.1.0", @@ -9127,6 +9344,7 @@ "integrity": "sha512-dQ94Nq4dbzmUWkQ0ANAWS9tBRfqCrn0bV9AMYdOi/MHW726xn7eQmMeRTpX2ViC00bpNaWXq+7o4lIQ3AX13Hg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "chalk": "^4.1.2", @@ -9143,6 +9361,7 @@ "integrity": "sha512-y4DKFLZ2y6DxTWD4cDe07RglV88ZiNEdlRfGtqahfbIjfsw1nMCPx49Uev4IA/hWn3sDKyAnSPwoYSsAEdcimw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@jest/types": "30.2.0", @@ -9164,6 +9383,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -9182,6 +9402,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9195,6 +9416,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -9282,6 +9504,7 @@ "integrity": "sha512-FBGWi7dP2hpdi8nBoWxSsLvBFewKAg0+uSQwBaof4Y4DPgBabXgpSYC5/lR7VmnIlSpASmCi/ntRWPbv7089Pw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/get-type": "30.1.0", "@jest/types": "30.2.0", @@ -9300,6 +9523,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -9313,6 +9537,7 @@ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -9326,6 +9551,7 @@ "integrity": "sha512-9uBdv/B4EefsuAL+pWqueZyZS2Ba+LxfFeQ9DN14HU4bN8bhaxKdkpjpB6fs9+pSjIBu+FXQHImEg8j/Lw0+vA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", @@ -9341,6 +9567,7 @@ "integrity": "sha512-PYxa28dxJ9g777pGm/7PrbnMeA0Jr7osHP9bS7eJy9DuAjMgdGtxgf0uKMyoIsTWAkIbUW5hSDdJ3urmgXBqxg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/test-result": "30.2.0", "@jest/types": "30.2.0", @@ -9361,6 +9588,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -9379,6 +9607,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9392,6 +9621,7 @@ "integrity": "sha512-0Q4Uk8WF7BUwqXHuAjc23vmopWJw5WH7w2tqBoUOZpOjW/ZnR44GXXd1r82RvnmI2GZge3ivrYXk/BE2+VtW2g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@types/node": "*", "@ungap/structured-clone": "^1.3.0", @@ -9409,6 +9639,7 @@ "integrity": "sha512-QKNsM0o3Xe6ISQU869e+DhG+4CK/48aHYdJZGlFQVTjnbvgpcKyxpzk29fGiO7i/J8VENZ+d2iGnSsvmuHywlA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@jest/types": "30.2.0", "@types/node": "*", @@ -9427,6 +9658,7 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -9440,6 +9672,7 @@ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -9463,6 +9696,7 @@ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -9477,6 +9711,7 @@ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true, "license": "MIT", + "peer": true, "bin": { "jsesc": "bin/jsesc" }, @@ -9608,6 +9843,7 @@ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -9665,6 +9901,7 @@ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-locate": "^4.1.0" }, @@ -9741,6 +9978,7 @@ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "yallist": "^3.0.2" } @@ -9792,6 +10030,7 @@ "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "semver": "^7.5.3" }, @@ -9815,6 +10054,7 @@ "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", "dev": true, "license": "BSD-3-Clause", + "peer": true, "dependencies": { "tmpl": "1.0.5" } @@ -9825,7 +10065,6 @@ "integrity": "sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA==", "dev": true, "license": "MIT", - "peer": true, "bin": { "marked": "bin/marked.js" }, @@ -10020,6 +10259,7 @@ "integrity": "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ==", "dev": true, "license": "MIT", + "peer": true, "bin": { "napi-postinstall": "lib/cli.js" }, @@ -10035,7 +10275,8 @@ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/neo-async": { "version": "2.6.2", @@ -10110,14 +10351,16 @@ "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/node-releases": { "version": "2.0.27", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/normalize-package-data": { "version": "6.0.2", @@ -10140,6 +10383,7 @@ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -12602,7 +12846,6 @@ "dev": true, "inBundle": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -12870,6 +13113,7 @@ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "wrappy": "1" } @@ -12972,6 +13216,7 @@ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "yocto-queue": "^0.1.0" }, @@ -12988,6 +13233,7 @@ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-limit": "^2.2.0" }, @@ -13001,6 +13247,7 @@ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "p-try": "^2.0.0" }, @@ -13053,6 +13300,7 @@ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6" } @@ -13138,6 +13386,7 @@ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -13148,6 +13397,7 @@ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -13246,6 +13496,7 @@ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">= 6" } @@ -13343,6 +13594,7 @@ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "find-up": "^4.0.0" }, @@ -13472,7 +13724,8 @@ "url": "https://opencollective.com/fast-check" } ], - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/rc": { "version": "1.2.8", @@ -13651,6 +13904,7 @@ "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "resolve-from": "^5.0.0" }, @@ -13757,7 +14011,6 @@ "integrity": "sha512-phCkJ6pjDi9ANdhuF5ElS10GGdAKY6R1Pvt9lT3SFhOwM4T7QZE7MLpBDbNruUx/Q3gFD92/UOFringGipRqZA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@semantic-release/commit-analyzer": "^13.0.0-beta.1", "@semantic-release/error": "^4.0.0", @@ -14280,6 +14533,7 @@ "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -14343,7 +14597,8 @@ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", "dev": true, - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "peer": true }, "node_modules/stack-utils": { "version": "2.0.6", @@ -14406,6 +14661,7 @@ "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "char-regex": "^1.0.2", "strip-ansi": "^6.0.0" @@ -14420,6 +14676,7 @@ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -14430,6 +14687,7 @@ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-regex": "^5.0.1" }, @@ -14539,6 +14797,7 @@ "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -14559,6 +14818,7 @@ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" }, @@ -14665,6 +14925,7 @@ "integrity": "sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@pkgr/core": "^0.2.9" }, @@ -14736,6 +14997,7 @@ "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "@istanbuljs/schema": "^0.1.2", "glob": "^7.1.4", @@ -14751,6 +15013,7 @@ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -14763,6 +15026,7 @@ "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -14784,6 +15048,7 @@ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -14908,7 +15173,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -14951,7 +15215,8 @@ "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", "dev": true, - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "peer": true }, "node_modules/to-regex-range": { "version": "5.0.1", @@ -15057,7 +15322,6 @@ "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", "@tsconfig/node10": "^1.0.7", @@ -15108,6 +15372,7 @@ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=4" } @@ -15118,6 +15383,7 @@ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", "dev": true, "license": "(MIT OR CC0-1.0)", + "peer": true, "engines": { "node": ">=10" }, @@ -15224,6 +15490,7 @@ "dev": true, "hasInstallScript": true, "license": "MIT", + "peer": true, "dependencies": { "napi-postinstall": "^0.3.0" }, @@ -15272,6 +15539,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" @@ -15335,6 +15603,7 @@ "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", "dev": true, "license": "ISC", + "peer": true, "dependencies": { "@jridgewell/trace-mapping": "^0.3.12", "@types/istanbul-lib-coverage": "^2.0.1", @@ -15361,7 +15630,6 @@ "integrity": "sha512-tI2l/nFHC5rLh7+5+o7QjKjSR04ivXDF4jcgV0f/bTQ+OJiITy5S6gaynVsEM+7RqzufMnVbIon6Sr5x1SDYaQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.5.0", @@ -15478,7 +15746,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -15578,6 +15845,7 @@ "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, "license": "Apache-2.0", + "peer": true, "dependencies": { "makeerror": "1.0.12" } @@ -15730,7 +15998,8 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "dev": true, - "license": "ISC" + "license": "ISC", + "peer": true }, "node_modules/write-file-atomic": { "version": "5.0.1", @@ -15807,7 +16076,8 @@ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true, - "license": "ISC" + "license": "ISC", + "peer": true }, "node_modules/yaml": { "version": "2.8.2", @@ -15914,6 +16184,7 @@ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -15939,7 +16210,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/packages/openai-adapters/src/convertToolsToVercel.ts b/packages/openai-adapters/src/convertToolsToVercel.ts index df0811ff042..c9363a87fcb 100644 --- a/packages/openai-adapters/src/convertToolsToVercel.ts +++ b/packages/openai-adapters/src/convertToolsToVercel.ts @@ -8,7 +8,7 @@ import type { ChatCompletionCreateParams } from "openai/resources/index.js"; * Converts OpenAI tool format to Vercel AI SDK format. * * OpenAI format: { type: "function", function: { name, description, parameters: JSONSchema } } - * Vercel format: { [toolName]: { description, parameters: aiJsonSchema(JSONSchema) } } + * Vercel format (AI SDK v5): { [toolName]: { description, inputSchema: aiJsonSchema(JSONSchema) } } * * @param openaiTools - Array of OpenAI tools or undefined * @returns Object with tool names as keys, or undefined if no tools @@ -27,7 +27,7 @@ export async function convertToolsToVercelFormat( if (tool.type === "function") { vercelTools[tool.function.name] = { description: tool.function.description, - parameters: aiJsonSchema( + inputSchema: aiJsonSchema( tool.function.parameters ?? { type: "object", properties: {} }, ), }; diff --git a/packages/openai-adapters/src/test/ai-sdk-v5-migration.test.ts b/packages/openai-adapters/src/test/ai-sdk-v5-migration.test.ts new file mode 100644 index 00000000000..0365315e01d --- /dev/null +++ b/packages/openai-adapters/src/test/ai-sdk-v5-migration.test.ts @@ -0,0 +1,593 @@ +import { describe, test, expect } from "vitest"; +import { convertToolsToVercelFormat } from "../convertToolsToVercel.js"; +import type { ChatCompletionCreateParams } from "openai/resources/index.js"; + +/** + * AI SDK v5 Migration Tests + * + * This test suite verifies that the migration from AI SDK v4 to v5 is correct. + * Key changes in v5: + * - Tool parameters renamed to inputSchema + * - Usage fields: promptTokens β†’ inputTokens, completionTokens β†’ outputTokens + * - Tool call structure: args β†’ input + * - Model initialization: anthropic(model) β†’ anthropic.chat(model) + * - maxTokens β†’ maxOutputTokens + */ + +describe("AI SDK v5 Migration: Tool Conversion", () => { + test("uses inputSchema instead of parameters for tool definitions", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "searchWeb", + description: "Search the web for information", + parameters: { + type: "object", + properties: { + query: { + type: "string", + description: "Search query", + }, + limit: { + type: "number", + description: "Maximum results", + default: 10, + }, + }, + required: ["query"], + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.searchWeb).toHaveProperty("inputSchema"); + expect(result?.searchWeb).not.toHaveProperty("parameters"); + expect(result?.searchWeb.description).toBe( + "Search the web for information", + ); + }); + + test("handles empty parameters object correctly", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "getCurrentTime", + description: "Get current time", + parameters: undefined, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.getCurrentTime).toHaveProperty("inputSchema"); + // Should default to empty object schema + expect(result?.getCurrentTime.inputSchema).toBeDefined(); + }); + + test("converts multiple tools with various parameter types", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "calculateSum", + description: "Calculate sum of numbers", + parameters: { + type: "object", + properties: { + numbers: { + type: "array", + items: { type: "number" }, + }, + }, + }, + }, + }, + { + type: "function", + function: { + name: "getUserInfo", + description: "Get user information", + parameters: { + type: "object", + properties: { + userId: { type: "string" }, + includeDetails: { type: "boolean" }, + }, + required: ["userId"], + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(Object.keys(result!)).toHaveLength(2); + + // Both should use inputSchema + expect(result?.calculateSum).toHaveProperty("inputSchema"); + expect(result?.calculateSum).not.toHaveProperty("parameters"); + + expect(result?.getUserInfo).toHaveProperty("inputSchema"); + expect(result?.getUserInfo).not.toHaveProperty("parameters"); + }); + + test("handles nested object schemas in inputSchema", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "createUser", + description: "Create a new user", + parameters: { + type: "object", + properties: { + user: { + type: "object", + properties: { + name: { type: "string" }, + email: { type: "string" }, + address: { + type: "object", + properties: { + street: { type: "string" }, + city: { type: "string" }, + zipCode: { type: "string" }, + }, + }, + }, + required: ["name", "email"], + }, + }, + required: ["user"], + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.createUser).toHaveProperty("inputSchema"); + expect(result?.createUser.inputSchema).toBeDefined(); + }); + + test("handles array parameters in inputSchema", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "processItems", + description: "Process multiple items", + parameters: { + type: "object", + properties: { + items: { + type: "array", + items: { + type: "object", + properties: { + id: { type: "string" }, + value: { type: "number" }, + }, + }, + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.processItems).toHaveProperty("inputSchema"); + }); + + test("maintains backward compatibility with existing tool formats", async () => { + // This test ensures that the conversion still works with older tool definitions + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "legacyTool", + description: "A legacy tool", + parameters: { + type: "object", + properties: { + input: { type: "string" }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.legacyTool).toHaveProperty("description"); + expect(result?.legacyTool).toHaveProperty("inputSchema"); + }); + + test("handles tools with enum parameters", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "setStatus", + description: "Set status", + parameters: { + type: "object", + properties: { + status: { + type: "string", + enum: ["active", "inactive", "pending"], + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.setStatus).toHaveProperty("inputSchema"); + }); + + test("handles tools with pattern constraints", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "validateEmail", + description: "Validate email format", + parameters: { + type: "object", + properties: { + email: { + type: "string", + pattern: "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$", + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.validateEmail).toHaveProperty("inputSchema"); + }); + + test("handles tools with numeric constraints", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "setTemperature", + description: "Set temperature value", + parameters: { + type: "object", + properties: { + value: { + type: "number", + minimum: -273.15, + maximum: 1000, + multipleOf: 0.1, + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.setTemperature).toHaveProperty("inputSchema"); + }); + + test("handles tools with string length constraints", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "setUsername", + description: "Set username", + parameters: { + type: "object", + properties: { + username: { + type: "string", + minLength: 3, + maxLength: 20, + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.setUsername).toHaveProperty("inputSchema"); + }); + + test("handles tools with array constraints", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "setTags", + description: "Set tags", + parameters: { + type: "object", + properties: { + tags: { + type: "array", + items: { type: "string" }, + minItems: 1, + maxItems: 10, + uniqueItems: true, + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.setTags).toHaveProperty("inputSchema"); + }); + + test("handles tools with oneOf/anyOf/allOf schemas", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "processData", + description: "Process data with flexible schema", + parameters: { + type: "object", + properties: { + data: { + oneOf: [ + { type: "string" }, + { type: "number" }, + { type: "object", properties: { value: { type: "string" } } }, + ], + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.processData).toHaveProperty("inputSchema"); + }); + + test("handles tools with additionalProperties", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "createConfig", + description: "Create configuration", + parameters: { + type: "object", + properties: { + name: { type: "string" }, + }, + additionalProperties: true, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.createConfig).toHaveProperty("inputSchema"); + }); +}); + +describe("AI SDK v5 Migration: Edge Cases", () => { + test("handles tool with empty string name gracefully", async () => { + const tools: any[] = [ + { + type: "function", + function: { + name: "", + description: "Tool with empty name", + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.[""]).toBeDefined(); + }); + + test("handles tool with special characters in name", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "tool_with_underscores", + description: "Tool name with underscores", + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.tool_with_underscores).toHaveProperty("inputSchema"); + }); + + test("handles duplicate tool names (last one wins)", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "duplicateTool", + description: "First tool", + parameters: { type: "object" }, + }, + }, + { + type: "function", + function: { + name: "duplicateTool", + description: "Second tool", + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(Object.keys(result!)).toHaveLength(1); + expect(result?.duplicateTool.description).toBe("Second tool"); + }); + + test("handles tool with null description", async () => { + const tools: any[] = [ + { + type: "function", + function: { + name: "nullDescTool", + description: null, + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.nullDescTool.description).toBeNull(); + }); + + test("handles tool with very long description", async () => { + const longDescription = "A".repeat(10000); + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "longDescTool", + description: longDescription, + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.longDescTool.description).toBe(longDescription); + expect(result?.longDescTool).toHaveProperty("inputSchema"); + }); + + test("handles tool with complex nested schemas", async () => { + const tools: ChatCompletionCreateParams["tools"] = [ + { + type: "function", + function: { + name: "complexNested", + description: "Complex nested structure", + parameters: { + type: "object", + properties: { + level1: { + type: "object", + properties: { + level2: { + type: "object", + properties: { + level3: { + type: "object", + properties: { + level4: { + type: "array", + items: { + type: "object", + properties: { + value: { type: "string" }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(result?.complexNested).toHaveProperty("inputSchema"); + }); + + test("handles mixed valid and invalid tools", async () => { + const tools: any[] = [ + { + type: "function", + function: { + name: "validTool", + description: "Valid tool", + parameters: { type: "object" }, + }, + }, + { + type: "invalid_type", + function: { + name: "invalidTool", + description: "Invalid tool type", + parameters: { type: "object" }, + }, + }, + { + type: "function", + function: { + name: "anotherValidTool", + description: "Another valid tool", + parameters: { type: "object" }, + }, + }, + ]; + + const result = await convertToolsToVercelFormat(tools); + + expect(result).toBeDefined(); + expect(Object.keys(result!)).toHaveLength(2); + expect(result?.validTool).toBeDefined(); + expect(result?.anotherValidTool).toBeDefined(); + expect(result?.invalidTool).toBeUndefined(); + }); +}); diff --git a/packages/openai-adapters/src/test/ai-sdk-v5-model-init.test.ts b/packages/openai-adapters/src/test/ai-sdk-v5-model-init.test.ts new file mode 100644 index 00000000000..49dba7906be --- /dev/null +++ b/packages/openai-adapters/src/test/ai-sdk-v5-model-init.test.ts @@ -0,0 +1,413 @@ +import { describe, test, expect } from "vitest"; + +/** + * AI SDK v5 Model Initialization Tests + * + * This test suite documents the change in model initialization between v4 and v5: + * - v4: const model = provider(modelName) + * - v5: const model = provider.chat(modelName) + * + * This is a breaking change that requires updating all provider usages. + */ + +describe("AI SDK v5 Migration: Model Initialization", () => { + describe("Model initialization pattern changes", () => { + test("demonstrates v4 pattern (function call)", () => { + // v4 pattern: provider is a function + // const anthropic = createAnthropic({ apiKey }); + // const model = anthropic(modelName); + + // Mock v4 provider + const mockV4Provider = (modelName: string) => ({ + modelName, + type: "v4-model", + }); + + const model = mockV4Provider("claude-3-5-sonnet-20241022"); + + expect(model.modelName).toBe("claude-3-5-sonnet-20241022"); + expect(model.type).toBe("v4-model"); + }); + + test("demonstrates v5 pattern (method call)", () => { + // v5 pattern: provider is an object with methods + // const anthropic = createAnthropic({ apiKey }); + // const model = anthropic.chat(modelName); + + // Mock v5 provider + const mockV5Provider = { + chat: (modelName: string) => ({ + modelName, + type: "v5-chat-model", + }), + languageModel: (modelName: string) => ({ + modelName, + type: "v5-language-model", + }), + }; + + const model = mockV5Provider.chat("claude-3-5-sonnet-20241022"); + + expect(model.modelName).toBe("claude-3-5-sonnet-20241022"); + expect(model.type).toBe("v5-chat-model"); + }); + + test("v5 provider.chat() vs provider.languageModel()", () => { + // v5 provides multiple model types + const mockV5Provider = { + chat: (modelName: string) => ({ + modelName, + type: "chat", + }), + languageModel: (modelName: string) => ({ + modelName, + type: "language", + }), + }; + + const chatModel = mockV5Provider.chat("model-name"); + const languageModel = mockV5Provider.languageModel("model-name"); + + expect(chatModel.type).toBe("chat"); + expect(languageModel.type).toBe("language"); + + // For most use cases, we should use .chat() + expect(chatModel.modelName).toBe(languageModel.modelName); + }); + }); + + describe("OpenAI provider model initialization", () => { + test("v4: openai(model)", () => { + // Mock v4 OpenAI provider + const mockOpenAIv4 = (modelName: string) => ({ + provider: "openai", + model: modelName, + version: "v4", + }); + + const model = mockOpenAIv4("gpt-4o"); + + expect(model.provider).toBe("openai"); + expect(model.model).toBe("gpt-4o"); + expect(model.version).toBe("v4"); + }); + + test("v5: openai.chat(model)", () => { + // Mock v5 OpenAI provider + const mockOpenAIv5 = { + chat: (modelName: string) => ({ + provider: "openai", + model: modelName, + version: "v5", + }), + }; + + const model = mockOpenAIv5.chat("gpt-4o"); + + expect(model.provider).toBe("openai"); + expect(model.model).toBe("gpt-4o"); + expect(model.version).toBe("v5"); + }); + + test("v5 supports additional OpenAI model types", () => { + const mockOpenAIv5 = { + chat: (model: string) => ({ type: "chat", model }), + completion: (model: string) => ({ type: "completion", model }), + embedding: (model: string) => ({ type: "embedding", model }), + }; + + const chatModel = mockOpenAIv5.chat("gpt-4o"); + const embeddingModel = mockOpenAIv5.embedding("text-embedding-3-small"); + + expect(chatModel.type).toBe("chat"); + expect(embeddingModel.type).toBe("embedding"); + }); + }); + + describe("Anthropic provider model initialization", () => { + test("v4: anthropic(model)", () => { + // Mock v4 Anthropic provider + const mockAnthropicv4 = (modelName: string) => ({ + provider: "anthropic", + model: modelName, + version: "v4", + }); + + const model = mockAnthropicv4("claude-3-5-sonnet-20241022"); + + expect(model.provider).toBe("anthropic"); + expect(model.model).toBe("claude-3-5-sonnet-20241022"); + expect(model.version).toBe("v4"); + }); + + test("v5: anthropic.chat(model)", () => { + // Mock v5 Anthropic provider + const mockAnthropicv5 = { + chat: (modelName: string) => ({ + provider: "anthropic", + model: modelName, + version: "v5", + }), + }; + + const model = mockAnthropicv5.chat("claude-3-5-sonnet-20241022"); + + expect(model.provider).toBe("anthropic"); + expect(model.model).toBe("claude-3-5-sonnet-20241022"); + expect(model.version).toBe("v5"); + }); + + test("v5 anthropic.chat() handles different model variants", () => { + const mockAnthropicv5 = { + chat: (modelName: string) => ({ + provider: "anthropic", + model: modelName, + }), + }; + + const sonnet = mockAnthropicv5.chat("claude-3-5-sonnet-20241022"); + const haiku = mockAnthropicv5.chat("claude-3-5-haiku-20241022"); + const opus = mockAnthropicv5.chat("claude-3-opus-20240229"); + + expect(sonnet.model).toContain("sonnet"); + expect(haiku.model).toContain("haiku"); + expect(opus.model).toContain("opus"); + }); + }); + + describe("Provider initialization with options", () => { + test("v4 provider with options", () => { + interface V4ProviderOptions { + apiKey: string; + baseURL?: string; + } + + const createV4Provider = (options: V4ProviderOptions) => { + return (modelName: string) => ({ + ...options, + model: modelName, + version: "v4", + }); + }; + + const provider = createV4Provider({ + apiKey: "test-key", + baseURL: "https://api.example.com", + }); + + const model = provider("test-model"); + + expect(model.apiKey).toBe("test-key"); + expect(model.baseURL).toBe("https://api.example.com"); + expect(model.model).toBe("test-model"); + }); + + test("v5 provider with options", () => { + interface V5ProviderOptions { + apiKey: string; + baseURL?: string; + } + + const createV5Provider = (options: V5ProviderOptions) => { + return { + chat: (modelName: string) => ({ + ...options, + model: modelName, + version: "v5", + }), + }; + }; + + const provider = createV5Provider({ + apiKey: "test-key", + baseURL: "https://api.example.com", + }); + + const model = provider.chat("test-model"); + + expect(model.apiKey).toBe("test-key"); + expect(model.baseURL).toBe("https://api.example.com"); + expect(model.model).toBe("test-model"); + }); + }); + + describe("Error handling patterns", () => { + test("v4 error: calling non-function provider", () => { + const notAFunction = { chat: () => ({}) }; + + // In v4, this would cause an error + expect(() => { + (notAFunction as any)("model-name"); + }).toThrow(); + }); + + test("v5 error: calling provider as function", () => { + const v5Provider = { + chat: (model: string) => ({ model }), + }; + + // In v5, this would cause an error + expect(() => { + (v5Provider as any)("model-name"); + }).toThrow(); + }); + + test("v5 error: missing chat method", () => { + const incompleteProvider = { + languageModel: (model: string) => ({ model }), + // missing chat method + }; + + expect((incompleteProvider as any).chat).toBeUndefined(); + }); + + test("v5 handles null/undefined model name", () => { + const mockV5Provider = { + chat: (modelName: string | null | undefined) => { + if (!modelName) { + throw new Error("Model name is required"); + } + return { model: modelName }; + }, + }; + + expect(() => mockV5Provider.chat(null as any)).toThrow( + "Model name is required", + ); + expect(() => mockV5Provider.chat(undefined as any)).toThrow( + "Model name is required", + ); + }); + }); + + describe("Migration path", () => { + test("adapter pattern for v4 to v5 migration", () => { + // Create an adapter that makes v5 provider work like v4 + const createV4CompatibleProvider = (v5Provider: any) => { + return (modelName: string) => v5Provider.chat(modelName); + }; + + const v5Provider = { + chat: (model: string) => ({ model, version: "v5" }), + }; + + const v4Compatible = createV4CompatibleProvider(v5Provider); + const model = v4Compatible("test-model"); + + expect(model.model).toBe("test-model"); + expect(model.version).toBe("v5"); + }); + + test("checking provider version", () => { + const detectProviderVersion = ( + provider: any, + ): "v4" | "v5" | "unknown" => { + if (typeof provider === "function") { + return "v4"; + } else if ( + typeof provider === "object" && + typeof provider.chat === "function" + ) { + return "v5"; + } + return "unknown"; + }; + + const v4Provider = () => ({}); + const v5Provider = { chat: () => ({}) }; + const unknownProvider = "not a provider"; + + expect(detectProviderVersion(v4Provider)).toBe("v4"); + expect(detectProviderVersion(v5Provider)).toBe("v5"); + expect(detectProviderVersion(unknownProvider as any)).toBe("unknown"); + }); + }); + + describe("Parameter name changes", () => { + test("v4 uses maxTokens", () => { + // v4 parameter name + const v4Params = { + model: "gpt-4o", + messages: [], + maxTokens: 1000, // v4 name + temperature: 0.7, + }; + + expect(v4Params).toHaveProperty("maxTokens"); + expect(v4Params).not.toHaveProperty("maxOutputTokens"); + }); + + test("v5 uses maxOutputTokens", () => { + // v5 parameter name + const v5Params = { + model: "gpt-4o", + messages: [], + maxOutputTokens: 1000, // v5 name + temperature: 0.7, + }; + + expect(v5Params).toHaveProperty("maxOutputTokens"); + expect(v5Params).not.toHaveProperty("maxTokens"); + }); + + test("migration helper for parameter names", () => { + const migrateParams = (v4Params: any) => { + const v5Params = { ...v4Params }; + + // Rename maxTokens to maxOutputTokens + if ("maxTokens" in v5Params) { + v5Params.maxOutputTokens = v5Params.maxTokens; + delete v5Params.maxTokens; + } + + return v5Params; + }; + + const v4Params = { + model: "gpt-4o", + maxTokens: 1000, + temperature: 0.7, + }; + + const v5Params = migrateParams(v4Params); + + expect(v5Params).toHaveProperty("maxOutputTokens", 1000); + expect(v5Params).not.toHaveProperty("maxTokens"); + expect(v5Params.temperature).toBe(0.7); + }); + }); + + describe("Type safety improvements in v5", () => { + test("v5 provides better type definitions", () => { + // v5 has more specific types for different model types + interface ChatModel { + type: "chat"; + model: string; + generate: () => string; + } + + interface EmbeddingModel { + type: "embedding"; + model: string; + embed: () => number[]; + } + + type V5Model = ChatModel | EmbeddingModel; + + const chatModel: V5Model = { + type: "chat", + model: "gpt-4o", + generate: () => "response", + }; + + const embeddingModel: V5Model = { + type: "embedding", + model: "text-embedding-3-small", + embed: () => [0.1, 0.2, 0.3], + }; + + expect(chatModel.type).toBe("chat"); + expect(embeddingModel.type).toBe("embedding"); + }); + }); +}); diff --git a/packages/openai-adapters/src/test/ai-sdk-v5-tool-calls.test.ts b/packages/openai-adapters/src/test/ai-sdk-v5-tool-calls.test.ts new file mode 100644 index 00000000000..1cdcfbb9536 --- /dev/null +++ b/packages/openai-adapters/src/test/ai-sdk-v5-tool-calls.test.ts @@ -0,0 +1,556 @@ +import { describe, test, expect } from "vitest"; + +/** + * AI SDK v5 Tool Call Migration Tests + * + * This test suite verifies correct handling of tool call changes in AI SDK v5: + * - v4: toolCall.args β†’ v5: toolCall.input + * - Tool call structure changes in responses + * + * The adapters need to map these fields correctly when converting from + * Vercel AI SDK responses back to OpenAI-compatible format. + */ + +describe("AI SDK v5 Migration: Tool Call Fields", () => { + describe("Tool call input field mapping", () => { + test("maps tool call input to OpenAI arguments format", () => { + // Simulating v5 tool call format + const v5ToolCall = { + toolName: "readFile", + input: { + filepath: "/path/to/file.txt", + encoding: "utf-8", + }, + }; + + // Expected OpenAI format + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat.type).toBe("function"); + expect(openAIFormat.function.name).toBe("readFile"); + expect(openAIFormat.function.arguments).toBe( + JSON.stringify({ filepath: "/path/to/file.txt", encoding: "utf-8" }), + ); + }); + + test("handles tool call with empty input", () => { + const v5ToolCall = { + toolName: "getCurrentTime", + input: {}, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat.function.arguments).toBe("{}"); + }); + + test("handles tool call with null input", () => { + const v5ToolCall = { + toolName: "ping", + input: null, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat.function.arguments).toBe("null"); + }); + + test("handles tool call with undefined input", () => { + const v5ToolCall = { + toolName: "status", + input: undefined, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input ?? {}), + }, + }; + + expect(openAIFormat.function.arguments).toBe("{}"); + }); + + test("handles tool call with complex nested input", () => { + const v5ToolCall = { + toolName: "createUser", + input: { + user: { + name: "John Doe", + email: "john@example.com", + address: { + street: "123 Main St", + city: "Springfield", + zipCode: "12345", + }, + }, + options: { + sendEmail: true, + validateAddress: false, + }, + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.user.name).toBe("John Doe"); + expect(parsedArgs.user.address.city).toBe("Springfield"); + expect(parsedArgs.options.sendEmail).toBe(true); + }); + + test("handles tool call with array input", () => { + const v5ToolCall = { + toolName: "processItems", + input: { + items: [ + { id: "1", value: 100 }, + { id: "2", value: 200 }, + { id: "3", value: 300 }, + ], + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.items).toHaveLength(3); + expect(parsedArgs.items[1].value).toBe(200); + }); + + test("handles tool call with boolean input", () => { + const v5ToolCall = { + toolName: "toggleFeature", + input: { + enabled: true, + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.enabled).toBe(true); + }); + + test("handles tool call with numeric input", () => { + const v5ToolCall = { + toolName: "setTemperature", + input: { + value: 23.5, + unit: "celsius", + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.value).toBe(23.5); + expect(parsedArgs.unit).toBe("celsius"); + }); + + test("handles tool call with special characters in input", () => { + const v5ToolCall = { + toolName: "sendMessage", + input: { + message: 'Hello "World"\nNew line\tTab', + recipient: "user@example.com", + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.message).toContain("Hello"); + expect(parsedArgs.message).toContain("World"); + expect(parsedArgs.message).toContain("\n"); + }); + }); + + describe("Multiple tool calls", () => { + test("handles multiple tool calls in single response", () => { + const v5ToolCalls = [ + { + toolName: "readFile", + input: { filepath: "/file1.txt" }, + }, + { + toolName: "readFile", + input: { filepath: "/file2.txt" }, + }, + { + toolName: "writeFile", + input: { filepath: "/output.txt", content: "data" }, + }, + ]; + + const openAIToolCalls = v5ToolCalls.map((tc) => ({ + type: "function" as const, + function: { + name: tc.toolName, + arguments: JSON.stringify(tc.input), + }, + })); + + expect(openAIToolCalls).toHaveLength(3); + expect(openAIToolCalls[0].function.name).toBe("readFile"); + expect(openAIToolCalls[2].function.name).toBe("writeFile"); + }); + + test("handles empty tool calls array", () => { + const v5ToolCalls: any[] = []; + + const openAIToolCalls = v5ToolCalls.map((tc) => ({ + type: "function" as const, + function: { + name: tc.toolName, + arguments: JSON.stringify(tc.input), + }, + })); + + expect(openAIToolCalls).toHaveLength(0); + }); + + test("handles tool calls with mixed input types", () => { + const v5ToolCalls = [ + { + toolName: "tool1", + input: { key: "value" }, + }, + { + toolName: "tool2", + input: {}, + }, + { + toolName: "tool3", + input: { nested: { data: [1, 2, 3] } }, + }, + ]; + + const openAIToolCalls = v5ToolCalls.map((tc) => ({ + type: "function" as const, + function: { + name: tc.toolName, + arguments: JSON.stringify(tc.input), + }, + })); + + expect(openAIToolCalls).toHaveLength(3); + expect(JSON.parse(openAIToolCalls[0].function.arguments)).toEqual({ + key: "value", + }); + expect(JSON.parse(openAIToolCalls[1].function.arguments)).toEqual({}); + expect( + JSON.parse(openAIToolCalls[2].function.arguments).nested.data, + ).toEqual([1, 2, 3]); + }); + }); + + describe("Tool call edge cases", () => { + test("handles tool call with very large input", () => { + const largeInput = { + data: "x".repeat(100000), + }; + + const v5ToolCall = { + toolName: "processLargeData", + input: largeInput, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.data.length).toBe(100000); + }); + + test("handles tool call with unicode characters", () => { + const v5ToolCall = { + toolName: "processText", + input: { + text: "Hello δΈ–η•Œ 🌍 ΠŸΡ€ΠΈΠ²Π΅Ρ‚", + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.text).toBe("Hello δΈ–η•Œ 🌍 ΠŸΡ€ΠΈΠ²Π΅Ρ‚"); + }); + + test("handles tool call with circular reference (should throw)", () => { + const circularInput: any = { key: "value" }; + circularInput.self = circularInput; + + const v5ToolCall = { + toolName: "failTool", + input: circularInput, + }; + + expect(() => { + JSON.stringify(v5ToolCall.input); + }).toThrow(); + }); + + test("handles tool call with Date objects", () => { + const date = new Date("2024-01-01T00:00:00Z"); + const v5ToolCall = { + toolName: "scheduleTask", + input: { + scheduledTime: date.toISOString(), + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.scheduledTime).toBe("2024-01-01T00:00:00.000Z"); + }); + + test("handles tool call with function values (should serialize)", () => { + const v5ToolCall = { + toolName: "testTool", + input: { + callback: undefined, // Functions become undefined in JSON + data: "value", + }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.callback).toBeUndefined(); + expect(parsedArgs.data).toBe("value"); + }); + }); + + describe("Tool call ID handling", () => { + test("preserves tool call ID when present", () => { + const v5ToolCall = { + id: "call_abc123", + toolName: "readFile", + input: { filepath: "/file.txt" }, + }; + + const openAIFormat = { + id: v5ToolCall.id, + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat.id).toBe("call_abc123"); + expect(openAIFormat.function.name).toBe("readFile"); + }); + + test("handles missing tool call ID", () => { + const v5ToolCall = { + toolName: "readFile", + input: { filepath: "/file.txt" }, + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat).not.toHaveProperty("id"); + }); + + test("handles tool call with empty ID", () => { + const v5ToolCall = { + id: "", + toolName: "readFile", + input: { filepath: "/file.txt" }, + }; + + const openAIFormat = { + id: v5ToolCall.id, + type: "function" as const, + function: { + name: v5ToolCall.toolName, + arguments: JSON.stringify(v5ToolCall.input), + }, + }; + + expect(openAIFormat.id).toBe(""); + }); + }); + + describe("Comparison: v4 args vs v5 input", () => { + test("demonstrates the field name change", () => { + // v4 format (old) + const v4ToolCall = { + toolName: "readFile", + args: { + filepath: "/file.txt", + }, + }; + + // v5 format (new) + const v5ToolCall = { + toolName: "readFile", + input: { + filepath: "/file.txt", + }, + }; + + // Verify the structural difference + expect(v4ToolCall).toHaveProperty("args"); + expect(v4ToolCall).not.toHaveProperty("input"); + + expect(v5ToolCall).toHaveProperty("input"); + expect(v5ToolCall).not.toHaveProperty("args"); + + // Content should be the same + expect(v5ToolCall.input).toEqual(v4ToolCall.args); + }); + + test("both v4 and v5 convert to same OpenAI format", () => { + const input = { filepath: "/file.txt" }; + + // v4 conversion + const v4OpenAI = { + type: "function" as const, + function: { + name: "readFile", + arguments: JSON.stringify(input), + }, + }; + + // v5 conversion + const v5OpenAI = { + type: "function" as const, + function: { + name: "readFile", + arguments: JSON.stringify(input), + }, + }; + + // Final OpenAI format should be identical + expect(v4OpenAI).toEqual(v5OpenAI); + }); + }); + + describe("Streaming tool calls", () => { + test("handles partial tool call streaming", () => { + // In streaming, tool calls might arrive in chunks + const streamedChunks = [ + { toolName: "readFile", input: undefined }, + { toolName: "readFile", input: { filepath: undefined } }, + { toolName: "readFile", input: { filepath: "/file" } }, + { toolName: "readFile", input: { filepath: "/file.txt" } }, + ]; + + // Final complete chunk + const finalChunk = streamedChunks[streamedChunks.length - 1]; + + const openAIFormat = { + type: "function" as const, + function: { + name: finalChunk.toolName, + arguments: JSON.stringify(finalChunk.input), + }, + }; + + expect(openAIFormat.function.arguments).toBe( + JSON.stringify({ filepath: "/file.txt" }), + ); + }); + + test("handles incomplete tool call in stream", () => { + const incompleteToolCall = { + toolName: "writeFile", + input: { filepath: "/out.txt" }, // missing 'content' + }; + + const openAIFormat = { + type: "function" as const, + function: { + name: incompleteToolCall.toolName, + arguments: JSON.stringify(incompleteToolCall.input), + }, + }; + + const parsedArgs = JSON.parse(openAIFormat.function.arguments); + expect(parsedArgs.filepath).toBe("/out.txt"); + expect(parsedArgs.content).toBeUndefined(); + }); + }); +}); diff --git a/packages/openai-adapters/src/test/ai-sdk-v5-usage.test.ts b/packages/openai-adapters/src/test/ai-sdk-v5-usage.test.ts new file mode 100644 index 00000000000..87b6d574901 --- /dev/null +++ b/packages/openai-adapters/src/test/ai-sdk-v5-usage.test.ts @@ -0,0 +1,368 @@ +import { describe, test, expect } from "vitest"; + +/** + * AI SDK v5 Usage Field Migration Tests + * + * This test suite verifies correct handling of usage field changes in AI SDK v5: + * - v4: usage.promptTokens β†’ v5: usage.inputTokens + * - v4: usage.completionTokens β†’ v5: usage.outputTokens + * - v4: usage.totalTokens β†’ v5: usage.totalTokens (unchanged) + * + * The adapters need to map these fields correctly when converting from + * Vercel AI SDK responses back to OpenAI-compatible format. + */ + +describe("AI SDK v5 Migration: Usage Fields", () => { + describe("Usage field mapping", () => { + test("maps inputTokens to prompt_tokens", () => { + // Simulating the mapping that should happen in the adapter + const vercelUsage = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }; + + // Expected OpenAI format + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.completion_tokens).toBe(50); + expect(expectedUsage.total_tokens).toBe(150); + }); + + test("handles missing inputTokens gracefully", () => { + // AI SDK v5 might not always provide all fields + const vercelUsage: any = { + outputTokens: 50, + totalTokens: 150, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(50); + expect(expectedUsage.total_tokens).toBe(150); + }); + + test("handles missing outputTokens gracefully", () => { + const vercelUsage: any = { + inputTokens: 100, + totalTokens: 150, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.completion_tokens).toBe(0); + expect(expectedUsage.total_tokens).toBe(150); + }); + + test("handles missing totalTokens gracefully", () => { + const vercelUsage: any = { + inputTokens: 100, + outputTokens: 50, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.completion_tokens).toBe(50); + expect(expectedUsage.total_tokens).toBe(0); + }); + + test("handles completely missing usage object", () => { + const vercelUsage: any = undefined; + + const expectedUsage = { + prompt_tokens: vercelUsage?.inputTokens ?? 0, + completion_tokens: vercelUsage?.outputTokens ?? 0, + total_tokens: vercelUsage?.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(0); + expect(expectedUsage.total_tokens).toBe(0); + }); + + test("handles zero values correctly", () => { + const vercelUsage = { + inputTokens: 0, + outputTokens: 0, + totalTokens: 0, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(0); + expect(expectedUsage.total_tokens).toBe(0); + }); + + test("handles large token counts", () => { + const vercelUsage = { + inputTokens: 100000, + outputTokens: 50000, + totalTokens: 150000, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(100000); + expect(expectedUsage.completion_tokens).toBe(50000); + expect(expectedUsage.total_tokens).toBe(150000); + }); + }); + + describe("Anthropic-specific usage fields", () => { + test("maps inputTokensDetails for Anthropic caching", () => { + // Anthropic has additional caching details in v5 + const vercelUsage: any = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + inputTokensDetails: { + cachedTokens: 30, + }, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + prompt_tokens_details: { + cached_tokens: vercelUsage.inputTokensDetails?.cachedTokens ?? 0, + cache_read_tokens: vercelUsage.inputTokensDetails?.cachedTokens ?? 0, + cache_write_tokens: 0, + }, + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.prompt_tokens_details.cached_tokens).toBe(30); + expect(expectedUsage.prompt_tokens_details.cache_read_tokens).toBe(30); + }); + + test("handles missing inputTokensDetails", () => { + const vercelUsage: any = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }; + + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + prompt_tokens_details: { + cached_tokens: vercelUsage.inputTokensDetails?.cachedTokens ?? 0, + cache_read_tokens: vercelUsage.inputTokensDetails?.cachedTokens ?? 0, + cache_write_tokens: 0, + }, + }; + + expect(expectedUsage.prompt_tokens_details.cached_tokens).toBe(0); + expect(expectedUsage.prompt_tokens_details.cache_read_tokens).toBe(0); + }); + + test("correctly renames promptTokensDetails to inputTokensDetails", () => { + // In v4, it was promptTokensDetails, in v5 it's inputTokensDetails + const v4Usage: any = { + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + promptTokensDetails: { + cachedTokens: 30, + }, + }; + + const v5Usage: any = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + inputTokensDetails: { + cachedTokens: 30, + }, + }; + + // Verify v4 format is different from v5 + expect(v4Usage).toHaveProperty("promptTokens"); + expect(v4Usage).toHaveProperty("promptTokensDetails"); + expect(v5Usage).toHaveProperty("inputTokens"); + expect(v5Usage).toHaveProperty("inputTokensDetails"); + + // Verify the correct mapping + expect(v5Usage.inputTokens).toBe(v4Usage.promptTokens); + expect(v5Usage.inputTokensDetails.cachedTokens).toBe( + v4Usage.promptTokensDetails.cachedTokens, + ); + }); + }); + + describe("Edge cases in usage field handling", () => { + test("handles negative values (invalid but defensive)", () => { + const vercelUsage: any = { + inputTokens: -10, + outputTokens: -5, + totalTokens: -15, + }; + + const expectedUsage = { + prompt_tokens: Math.max(0, vercelUsage.inputTokens ?? 0), + completion_tokens: Math.max(0, vercelUsage.outputTokens ?? 0), + total_tokens: Math.max(0, vercelUsage.totalTokens ?? 0), + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(0); + expect(expectedUsage.total_tokens).toBe(0); + }); + + test("handles non-numeric values (type safety)", () => { + const vercelUsage: any = { + inputTokens: "not a number", + outputTokens: null, + totalTokens: undefined, + }; + + const safeNumber = (val: any): number => { + const num = Number(val); + return isNaN(num) ? 0 : num; + }; + + const expectedUsage = { + prompt_tokens: safeNumber(vercelUsage.inputTokens), + completion_tokens: safeNumber(vercelUsage.outputTokens), + total_tokens: safeNumber(vercelUsage.totalTokens), + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(0); + expect(expectedUsage.total_tokens).toBe(0); + }); + + test("handles floating point token counts", () => { + const vercelUsage = { + inputTokens: 100.5, + outputTokens: 50.7, + totalTokens: 151.2, + }; + + const expectedUsage = { + prompt_tokens: Math.floor(vercelUsage.inputTokens ?? 0), + completion_tokens: Math.floor(vercelUsage.outputTokens ?? 0), + total_tokens: Math.floor(vercelUsage.totalTokens ?? 0), + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.completion_tokens).toBe(50); + expect(expectedUsage.total_tokens).toBe(151); + }); + + test("verifies totalTokens consistency", () => { + const vercelUsage = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }; + + const calculatedTotal = + vercelUsage.inputTokens + vercelUsage.outputTokens; + const providedTotal = vercelUsage.totalTokens; + + // Total tokens should match sum of input and output + expect(calculatedTotal).toBe(providedTotal); + }); + + test("handles inconsistent totalTokens", () => { + const vercelUsage = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 200, // Inconsistent! + }; + + const calculatedTotal = + vercelUsage.inputTokens + vercelUsage.outputTokens; + const providedTotal = vercelUsage.totalTokens; + + // This demonstrates the inconsistency + expect(calculatedTotal).not.toBe(providedTotal); + expect(calculatedTotal).toBe(150); + expect(providedTotal).toBe(200); + + // In practice, we should use the provided totalTokens + const expectedUsage = { + prompt_tokens: vercelUsage.inputTokens ?? 0, + completion_tokens: vercelUsage.outputTokens ?? 0, + total_tokens: vercelUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.total_tokens).toBe(200); + }); + }); + + describe("Streaming vs non-streaming usage differences", () => { + test("handles streaming usage accumulation", () => { + // In streaming, usage might be accumulated over multiple chunks + const streamChunks = [ + { inputTokens: 10, outputTokens: 0, totalTokens: 10 }, + { inputTokens: 0, outputTokens: 5, totalTokens: 15 }, + { inputTokens: 0, outputTokens: 5, totalTokens: 20 }, + { inputTokens: 0, outputTokens: 5, totalTokens: 25 }, + ]; + + // The final usage should be from the last chunk + const finalUsage = streamChunks[streamChunks.length - 1]; + + const expectedUsage = { + prompt_tokens: finalUsage.inputTokens ?? 0, + completion_tokens: finalUsage.outputTokens ?? 0, + total_tokens: finalUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(0); + expect(expectedUsage.completion_tokens).toBe(5); + expect(expectedUsage.total_tokens).toBe(25); + }); + + test("non-streaming provides complete usage immediately", () => { + const nonStreamUsage = { + inputTokens: 100, + outputTokens: 50, + totalTokens: 150, + }; + + const expectedUsage = { + prompt_tokens: nonStreamUsage.inputTokens ?? 0, + completion_tokens: nonStreamUsage.outputTokens ?? 0, + total_tokens: nonStreamUsage.totalTokens ?? 0, + }; + + expect(expectedUsage.prompt_tokens).toBe(100); + expect(expectedUsage.completion_tokens).toBe(50); + expect(expectedUsage.total_tokens).toBe(150); + }); + }); +}); diff --git a/packages/openai-adapters/src/test/convertToolsToVercel.test.ts b/packages/openai-adapters/src/test/convertToolsToVercel.test.ts index cfb5b02ceb9..436f16a3008 100644 --- a/packages/openai-adapters/src/test/convertToolsToVercel.test.ts +++ b/packages/openai-adapters/src/test/convertToolsToVercel.test.ts @@ -2,6 +2,15 @@ import { describe, test, expect } from "vitest"; import { convertToolsToVercelFormat } from "../convertToolsToVercel.js"; import type { ChatCompletionCreateParams } from "openai/resources/index.js"; +/** + * Tests for convertToolsToVercelFormat function + * + * This function converts OpenAI tool format to Vercel AI SDK v5 format. + * Key changes in v5: + * - `parameters` renamed to `inputSchema` + * - Tool format: { [toolName]: { description, inputSchema: aiJsonSchema(JSONSchema) } } + */ + describe("convertToolsToVercelFormat", () => { test("returns undefined for undefined tools", async () => { const result = await convertToolsToVercelFormat(undefined); @@ -42,9 +51,9 @@ describe("convertToolsToVercelFormat", () => { "description", "Read a file from disk", ); - expect(result?.readFile).toHaveProperty("parameters"); - // Check that parameters were wrapped with aiJsonSchema - expect(result?.readFile.parameters).toBeDefined(); + // AI SDK v5: uses inputSchema instead of parameters + expect(result?.readFile).toHaveProperty("inputSchema"); + expect(result?.readFile.inputSchema).toBeDefined(); }); test("converts multiple function tools", async () => { @@ -146,7 +155,7 @@ describe("convertToolsToVercelFormat", () => { expect(result).toBeUndefined(); }); - test("preserves parameter schema structure", async () => { + test("preserves parameter schema structure (AI SDK v5: inputSchema)", async () => { const tools: ChatCompletionCreateParams["tools"] = [ { type: "function", @@ -181,9 +190,9 @@ describe("convertToolsToVercelFormat", () => { const result = await convertToolsToVercelFormat(tools); expect(result).toBeDefined(); - expect(result?.complexTool.parameters).toBeDefined(); - // The parameters should be wrapped but still contain the original schema structure - // We can't easily test the internal structure of aiJsonSchema, but we can verify it exists - expect(result?.complexTool.parameters).toBeTruthy(); + // AI SDK v5: uses inputSchema instead of parameters + expect(result?.complexTool.inputSchema).toBeDefined(); + // The inputSchema should be wrapped with aiJsonSchema + expect(result?.complexTool.inputSchema).toBeTruthy(); }); }); diff --git a/worktree-config.yaml b/worktree-config.yaml new file mode 100644 index 00000000000..0177915fbe8 --- /dev/null +++ b/worktree-config.yaml @@ -0,0 +1,24 @@ +# Worktree Copy Configuration +# Edit this file to customize what gets copied when creating new worktrees +# +# Supports glob patterns: +# - packages/*/node_modules (matches all package node_modules) +# - **/.next (matches .next in any subdirectory) +# - services/**/*.env (matches all .env files in services) + +# Large directories - copied using Copy-on-Write (fast, space-efficient) +cowCopyTargets: + - node_modules + - app/node_modules + - core/node_modules + - core/dist + - extensions/*/node_modules # Glob: all services + - packages/*/node_modules # Glob: all packages + - packages/*/dist # Glob: all package dist builds + - packages/*/out # Glob: all package out builds + +# Small files - copied with regular cp (fast for individual files) +regularCopyTargets: + - .env + - core/.env +